Upload training_args.yaml with huggingface_hub
Browse files- training_args.yaml +30 -0
training_args.yaml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
bf16: true
|
2 |
+
cutoff_len: 2048
|
3 |
+
dataset: limo
|
4 |
+
dataset_dir: data
|
5 |
+
ddp_timeout: 180000000
|
6 |
+
do_train: true
|
7 |
+
finetuning_type: full
|
8 |
+
flash_attn: auto
|
9 |
+
gradient_accumulation_steps: 8
|
10 |
+
include_num_input_tokens_seen: true
|
11 |
+
learning_rate: 2.0e-05
|
12 |
+
logging_steps: 5
|
13 |
+
lr_scheduler_type: cosine
|
14 |
+
max_grad_norm: 1.0
|
15 |
+
max_samples: 100000
|
16 |
+
model_name_or_path: Qwen/Qwen2.5-1.5B
|
17 |
+
num_train_epochs: 15.0
|
18 |
+
optim: adamw_torch
|
19 |
+
output_dir: saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10
|
20 |
+
packing: false
|
21 |
+
per_device_train_batch_size: 2
|
22 |
+
plot_loss: true
|
23 |
+
preprocessing_num_workers: 16
|
24 |
+
report_to:
|
25 |
+
- wandb
|
26 |
+
save_steps: 100
|
27 |
+
stage: sft
|
28 |
+
template: default
|
29 |
+
trust_remote_code: true
|
30 |
+
warmup_steps: 0
|