|
checkpointing_steps: '50' |
|
config_name: null |
|
dataset_config_name: null |
|
dataset_name: null |
|
gradient_accumulation_steps: 16 |
|
learning_rate: 2.0e-05 |
|
logging_steps: 1 |
|
lora_alpha: 16 |
|
lora_dropout: 0.1 |
|
lora_rank: 64 |
|
low_cpu_mem_usage: false |
|
lr_scheduler_type: cosine |
|
max_seq_length: 2048 |
|
max_train_steps: 550 |
|
model_name_or_path: meta-llama/Llama-2-7b-chat-hf |
|
num_train_epochs: 2 |
|
output_dir: multi_passage_w_qa |
|
overwrite_cache: false |
|
per_device_train_batch_size: 1 |
|
preprocessing_num_workers: 16 |
|
report_to: tensorboard |
|
resume_from_checkpoint: multi_passage_w_qa/step_100 |
|
save_merged_lora_model: false |
|
seed: null |
|
tokenizer_name: meta-llama/Llama-2-7b-chat-hf |
|
train_file: /gscratch/h2lab/abhikam/llm-factuality/data/training/open-instruct-multi-qa.json |
|
use_flash_attn: true |
|
use_lora: false |
|
use_slow_tokenizer: true |
|
warmup_ratio: 0.03 |
|
weight_decay: 0.0 |
|
with_tracking: true |
|
|