Text Generation
Transformers
Safetensors
English
mistral
conversational
Eval Results
Inference Endpoints
text-generation-inference
ehartford commited on
Commit
5c32e51
1 Parent(s): 8f8ee79

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. configs/dolphin-dpo.yml +82 -0
configs/dolphin-dpo.yml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: cognitivecomputations/dolphin-2.6-mistral-7b
2
+ model_type: MistralForCausalLM
3
+ tokenizer_type: LlamaTokenizer
4
+ is_mistral_derived_model: true
5
+
6
+ load_in_8bit: false
7
+ load_in_4bit: true
8
+ strict: false
9
+
10
+ rl: true
11
+ datasets:
12
+ - path: argilla/ultrafeedback-binarized-preferences-cleaned
13
+ split: train
14
+ type: ultra_apply_chatml
15
+ - path: unalignment/toxic-dpo-v0.1
16
+ split: train
17
+ type: toxic_apply_chatml
18
+
19
+ dataset_prepared_path: last_run_prepared
20
+ val_set_size: 0.0
21
+ output_dir: /workspace/dolphin-2.6-mistral-7b-dpo
22
+
23
+ adapter: qlora
24
+ lora_model_dir:
25
+
26
+ sequence_len: 2048
27
+ sample_packing: false
28
+ pad_to_sequence_len: false
29
+
30
+ lora_r: 64
31
+ lora_alpha: 32
32
+ lora_dropout: 0.05
33
+ lora_target_linear: true
34
+ lora_fan_in_fan_out:
35
+ lora_target_modules:
36
+ - gate_proj
37
+ - down_proj
38
+ - up_proj
39
+ - q_proj
40
+ - v_proj
41
+ - k_proj
42
+ - o_proj
43
+
44
+ wandb_project: dolphin
45
+ wandb_entity:
46
+ wandb_watch:
47
+ wandb_run_id:
48
+ wandb_log_model:
49
+
50
+ gradient_accumulation_steps: 4
51
+ micro_batch_size: 4
52
+ num_epochs: 1
53
+ optimizer: paged_adamw_8bit
54
+ lr_scheduler: cosine
55
+ learning_rate: 2e-5
56
+
57
+ train_on_inputs: false
58
+ group_by_length: false
59
+ bf16: true
60
+ fp16: false
61
+ tf32: true
62
+
63
+ gradient_checkpointing: true
64
+ early_stopping_patience:
65
+ resume_from_checkpoint:
66
+ local_rank:
67
+ logging_steps: 1
68
+ xformers_attention:
69
+ flash_attention: true
70
+
71
+ warmup_steps: 10
72
+ eval_steps:
73
+ eval_table_size:
74
+ eval_table_max_new_tokens: 128
75
+ save_steps: 239
76
+ debug:
77
+ deepspeed:
78
+ weight_decay: 0.0
79
+ fsdp:
80
+ fsdp_config:
81
+ special_tokens:
82
+ save_safetensors: true