adam_beta1: 0.9 | |
adam_beta2: 0.999 | |
bf16: true | |
cutoff_len: 2048 | |
dataset: mlfoundations-dev/oh_v3-1_only_gpt4_llm | |
dataset_dir: ONLINE | |
ddp_timeout: 180000000 | |
deepspeed: dcft/train/zero3.json | |
do_train: true | |
enable_liger_kernel: false | |
eval_strategy: epoch | |
finetuning_type: full | |
formatting: sharegpt | |
global_batch_size: 512 | |
gradient_accumulation_steps: 8 | |
gradient_checkpointing: true | |
hub_model_id: mlfoundations-dev/oh_v3-1_only_gpt4_llm | |
include_hp: dcft/train/hp_settings/hritik.yaml | |
learning_rate: 5.0e-06 | |
logging_steps: 10 | |
lr_scheduler_type: constant | |
max_grad_norm: 1 | |
messages: conversations | |
model_name_or_path: meta-llama/Meta-Llama-3.1-8B | |
neat_packing: true | |
num_train_epochs: 3.0 | |
output_dir: /dev/shm/experiments/train/checkpoints/oh_v3-1_only_gpt4_llm | |
overwrite_cache: true | |
overwrite_output_dir: true | |
packing: true | |
per_device_train_batch_size: 8 | |
plot_loss: true | |
preprocessing_num_workers: 16 | |
push_to_db: true | |
push_to_hub: true | |
report_to: wandb | |
run_name: oh_v3-1_only_gpt4_llm | |
save_strategy: epoch | |
stage: sft | |
template: llama3 | |
val_size: 0.05 | |
warmup_ratio: 0.1 | |
warmup_steps: 1738 | |
weight_decay: 0.1 | |