mcore_gpt: true micro_batch_size: 1 global_batch_size: 128 tensor_model_parallel_size: 8 pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null encoder_seq_length: 8192 max_position_embeddings: 8192 num_layers: 80 hidden_size: 8192 ffn_hidden_size: 28672 num_attention_heads: 64 init_method_std: 0.02 use_scaled_init_method: true hidden_dropout: 0.0 attention_dropout: 0.0 ffn_dropout: 0.0 kv_channels: null apply_query_key_layer_scaling: true normalization: rmsnorm layernorm_epsilon: 1.0e-05 do_layer_norm_weight_decay: false make_vocab_size_divisible_by: 128 pre_process: true post_process: true persist_layer_norm: true bias: false activation: fast-swiglu headscale: false transformer_block_type: pre_ln openai_gelu: false normalize_attention_scores: true position_embedding_type: rope rotary_percentage: 1.0 attention_type: multihead share_embeddings_and_output_weights: false overlap_p2p_comm: false batch_p2p_comm: true num_query_groups: 8 tokenizer: library: huggingface type: meta-llama/Meta-Llama-3-70B use_fast: true native_amp_init_scale: 4294967296 native_amp_growth_interval: 1000 hysteresis: 2 fp32_residual_connection: false fp16_lm_cross_entropy: false megatron_amp_O2: true grad_allreduce_chunk_size_mb: 125 grad_div_ar_fusion: true gradient_accumulation_fusion: false bias_activation_fusion: false bias_dropout_add_fusion: false masked_softmax_fusion: true get_attention_mask_from_fusion: true apply_rope_fusion: false seed: 1234 resume_from_checkpoint: null use_cpu_initialization: false onnx_safe: false apex_transformer_log_level: 30 gradient_as_bucket_view: false sync_batch_comm: false activations_checkpoint_granularity: null activations_checkpoint_method: null activations_checkpoint_num_layers: null num_micro_batches_with_partial_activation_checkpoints: null activations_checkpoint_layers_per_pipeline: null sequence_parallel: false transformer_engine: true fp8: false fp8_e4m3: false fp8_hybrid: true fp8_margin: 0 fp8_interval: 1 fp8_amax_history_len: 1024 fp8_amax_compute_algo: max reduce_amax: true use_emha: false data: chat: true chat_prompt_tokens: system_turn_start: turn_start: label_start: end_of_turn: ' ' end_of_name: ' ' steerlm2_weight_micro_batch_size: 1 steerlm2_micro_batch_size: 1 sample: true num_workers: 0 dataloader_type: single train_ds: file_path: /dataset/output_daring_anteater_2k_v2_full_clean.jsonl global_batch_size: 128 micro_batch_size: 1 shuffle: true memmap_workers: null max_seq_length: 4096 min_seq_length: 1 drop_last: true label_key: output add_eos: false add_sep: false add_bos: false truncation_field: input index_mapping_dir: /indexmap_dir prompt_template: 'System {system message} User {turn 1 user message} Assistant {turn 1 assistant label} {turn 1 assistant message} User {turn 2 user message} Assistant {turn 2 assistant label} {turn 2 assistant message} ' hf_dataset: true truncation_method: right validation_ds: file_path: /dataset/output_daring_anteater_2k_v2_full_clean.jsonl global_batch_size: 128 micro_batch_size: 1 shuffle: false memmap_workers: null max_seq_length: 4096 min_seq_length: 1 drop_last: true label_key: output add_eos: false add_sep: false add_bos: false truncation_field: input index_mapping_dir: /indexmap_dir prompt_template: 'System {system message} User {turn 1 user message} Assistant {turn 1 assistant label} {turn 1 assistant message} User {turn 2 user message} Assistant {turn 2 assistant label} {turn 2 assistant message} ' hf_dataset: true truncation_method: right output_original_text: true nsys_profile: enabled: false start_step: 10 end_step: 10 ranks: - 0 gen_shape: false optim: name: distributed_fused_adam lr: 1e-07 weight_decay: 0.01 betas: - 0.9 - 0.98 sched: name: CosineAnnealing warmup_steps: 1 constant_steps: 300 min_lr: 0.9999e-07 bucket_cap_mb: 200 overlap_grad_sync: false contiguous_grad_buffer: true rotary_base: 500000.0 precision: bf16-mixed target: nemo_aligner.models.nlp.gpt.gpt_sft_model.GPTSFTModel nemo_version: 1.23.0rc0 answer_only_loss: true restore_from_path: /models/unpack_70b_daring_anteater_20k_full_1e-7_small_gbs_100 save_nemo_on_validation_end: true use_flash_attention: null pipeline_model_parallel_split_rank: 0 peft: peft_scheme: none restore_from_path: null lora_tuning: target_modules: - attention_qkv adapter_dim: 32 adapter_dropout: 0.0 column_init_method: xavier row_init_method: zero layer_selection: null weight_tying: false position_embedding_strategy: null inference: {}