sedrickkeh commited on
Commit
e116efc
1 Parent(s): 85e401c

Upload configs.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. configs.yaml +37 -0
configs.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bf16: true
2
+ cutoff_len: 512
3
+ dataset: llamafactory/alpaca_en
4
+ dataset_dir: ONLINE
5
+ ddp_timeout: 180000000
6
+ deepspeed: dcft/train/zero3.json
7
+ do_train: true
8
+ enable_liger_kernel: true
9
+ eval_strategy: epoch
10
+ finetuning_type: full
11
+ formatting: alpaca
12
+ global_batch_size: 512
13
+ gradient_accumulation_steps: 8
14
+ gradient_checkpointing: true
15
+ hub_model_id: mlfoundations-dev/test_run_mini
16
+ learning_rate: 2.0e-05
17
+ logging_steps: 10
18
+ lr_scheduler_type: cosine
19
+ max_steps: 3
20
+ model_name_or_path: meta-llama/Llama-3.2-1B
21
+ neat_packing: true
22
+ output_dir: /data4/dcft/experiments/train/checkpoints/test_run_mini
23
+ overwrite_cache: true
24
+ overwrite_output_dir: true
25
+ packing: true
26
+ per_device_train_batch_size: 8
27
+ plot_loss: true
28
+ preprocessing_num_workers: 16
29
+ push_to_db: false
30
+ push_to_hub: true
31
+ report_to: wandb
32
+ run_name: test_run_mini
33
+ save_strategy: epoch
34
+ stage: sft
35
+ template: alpaca
36
+ val_size: 0.05
37
+ warmup_ratio: 0.1