NeMo
English
nvidia
llama3.1
reward model
Llama-3.1-Nemotron-70B-Reward / model_config.yaml
zhilinw's picture
Upload folder using huggingface_hub
5c89484 verified
raw
history blame
3.38 kB
mcore_gpt: true
micro_batch_size: 1
global_batch_size: 128
tensor_model_parallel_size: 8
pipeline_model_parallel_size: 2
virtual_pipeline_model_parallel_size: null
encoder_seq_length: 4096
max_position_embeddings: 4096
num_layers: 80
hidden_size: 8192
ffn_hidden_size: 28672
num_attention_heads: 64
init_method_std: 0.02
use_scaled_init_method: true
hidden_dropout: 0.0
attention_dropout: 0.0
ffn_dropout: 0.0
kv_channels: null
apply_query_key_layer_scaling: true
normalization: rmsnorm
layernorm_epsilon: 1.0e-05
do_layer_norm_weight_decay: false
make_vocab_size_divisible_by: 128
pre_process: true
post_process: true
persist_layer_norm: true
bias: false
activation: fast-swiglu
headscale: false
transformer_block_type: pre_ln
openai_gelu: false
normalize_attention_scores: true
position_embedding_type: rope
rotary_percentage: 1.0
attention_type: multihead
share_embeddings_and_output_weights: false
overlap_p2p_comm: false
batch_p2p_comm: true
num_query_groups: 8
scale_positional_embedding: true
tokenizer:
library: huggingface
type: meta-llama/Meta-Llama-3.1-70B-Instruct
use_fast: true
native_amp_init_scale: 4294967296
native_amp_growth_interval: 1000
hysteresis: 2
fp32_residual_connection: false
fp16_lm_cross_entropy: false
megatron_amp_O2: true
grad_allreduce_chunk_size_mb: 125
grad_div_ar_fusion: true
gradient_accumulation_fusion: false
bias_activation_fusion: false
bias_dropout_add_fusion: false
masked_softmax_fusion: true
get_attention_mask_from_fusion: true
apply_rope_fusion: false
seed: 1234
resume_from_checkpoint: null
use_cpu_initialization: false
onnx_safe: false
apex_transformer_log_level: 30
gradient_as_bucket_view: true
sync_batch_comm: false
activations_checkpoint_granularity: full
activations_checkpoint_method: uniform
activations_checkpoint_num_layers: 1
num_micro_batches_with_partial_activation_checkpoints: null
activations_checkpoint_layers_per_pipeline: null
sequence_parallel: false
transformer_engine: true
fp8: false
fp8_e4m3: false
fp8_hybrid: true
fp8_margin: 0
fp8_interval: 1
fp8_amax_history_len: 1024
fp8_amax_compute_algo: max
reduce_amax: true
use_emha: false
data:
index_mapping_dir: null
data_impl: jsonl
splits_string: null
seq_length: 4096
skip_warmup: true
num_workers: 0
dataloader_type: single
reset_position_ids: false
reset_attention_mask: false
eod_mask_loss: false
validation_drop_last: true
no_seqlen_plus_one_input_tokens: false
pad_samples_to_global_batch_size: false
shuffle_documents: true
data_prefix:
train:
- /dataset/train.jsonl
validation:
- /dataset/val.jsonl
test:
- /dataset/val.jsonl
nsys_profile:
enabled: false
start_step: 10
end_step: 10
ranks:
- 0
gen_shape: false
optim:
name: distributed_fused_adam
lr: 1.0e-06
weight_decay: 0.1
betas:
- 0.9
- 0.98
sched:
name: CosineAnnealing
warmup_steps: 10
constant_steps: 0
min_lr: 1.0e-06
max_steps: 317
bucket_cap_mb: 200
overlap_grad_sync: false
contiguous_grad_buffer: true
rotary_base: 500000.0
precision: bf16
reward_model_type: regression
regression:
num_attributes: 9
merge_attributes: false
attribute_weights: null
loss_mask_val: -100
output_sequence: false
use_avg_pool: false
force_head_dtype: float32
target: nemo_aligner.models.nlp.gpt.megatron_gpt_regression_reward_model.MegatronGPTRegressionRewardModel
nemo_version: 2.0.0rc2