Create qlora-instruct-70b-dpo.yaml

#12
Files changed (1) hide show
  1. qlora-instruct-70b-dpo.yaml +70 -0
qlora-instruct-70b-dpo.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: meta-llama/Meta-Llama-3-70B-Instruct
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: true
7
+ strict: false
8
+
9
+ save_safetensors: true
10
+
11
+ rl: dpo
12
+ chat_template: chatml
13
+ datasets:
14
+ - path: argilla/ultrafeedback-binarized-preferences
15
+ split: train
16
+ type: chatml.argilla
17
+
18
+ dataset_prepared_path:
19
+ val_set_size: 0.0
20
+ output_dir: ./MaziyarPanahi/Llama-3-70B-Instruct-DPO-v0.4
21
+
22
+ adapter: qlora
23
+ lora_model_dir:
24
+
25
+ sequence_len: 3190
26
+ sample_packing: true
27
+ pad_to_sequence_len: false
28
+
29
+ lora_r: 64
30
+ lora_alpha: 32
31
+ lora_dropout: 0.05
32
+ lora_target_linear: true
33
+ lora_fan_in_fan_out:
34
+
35
+ wandb_project:
36
+ wandb_entity:
37
+ wandb_watch:
38
+ wandb_name:
39
+ wandb_log_model:
40
+
41
+ gradient_accumulation_steps: 4
42
+ micro_batch_size: 2
43
+ num_epochs: 3
44
+ optimizer: paged_adamw_32bit
45
+ lr_scheduler: cosine
46
+ learning_rate: 5e-7
47
+ train_on_inputs: false
48
+ group_by_length: false
49
+
50
+ bf16: auto
51
+ fp16:
52
+ tf32:
53
+
54
+ gradient_checkpointing: true
55
+ early_stopping_patience:
56
+ resume_from_checkpoint:
57
+ local_rank:
58
+ logging_steps: 1
59
+ xformers_attention:
60
+ flash_attention: true
61
+ warmup_steps: 100
62
+ evals_per_epoch: 1
63
+ eval_table_size:
64
+ eval_table_max_new_tokens: 128
65
+ saves_per_epoch: 4
66
+ debug:
67
+ deepspeed:
68
+ weight_decay: 0.0
69
+ special_tokens:
70
+ pad_token: "<|end_of_text|>"