Delta-Vector commited on
Commit
cd323f9
1 Parent(s): 4633392

Update Qwen7B.yaml

Browse files
Files changed (1) hide show
  1. Qwen7B.yaml +29 -8
Qwen7B.yaml CHANGED
@@ -9,19 +9,31 @@ load_in_4bit: false
9
  strict: false
10
 
11
  datasets:
12
- - path:
 
 
 
 
 
 
13
  type: sharegpt
14
  conversation: chatml
15
  - path: lodrick-the-lafted/NopmWritingStruct
16
  type: sharegpt
17
  conversation: chatml
 
 
 
18
  - path: kalomaze/Opus_Instruct_25k
19
  type: sharegpt
20
  conversation: chatml
21
  - path: kalomaze/Opus_Instruct_3k
22
  type: sharegpt
23
  conversation: chatml
24
- - path: NewEden/Claude-Data-Anon-Killed
 
 
 
25
  type: sharegpt
26
  conversation: chatml
27
  - path: PJMixers/lodrick-the-lafted_OpusStories-ShareGPT
@@ -32,7 +44,7 @@ chat_template: chatml
32
  dataset_prepared_path:
33
  val_set_size: 0.05
34
  output_dir: ./outputs/out
35
- sequence_len: 32768
36
  sample_packing: true
37
  eval_sample_packing: true
38
  pad_to_sequence_len: true
@@ -45,13 +57,22 @@ lora_dropout:
45
  lora_target_linear: true
46
  lora_fan_in_fan_out:
47
 
48
- wandb_project: Magnum-9b
49
  wandb_entity:
50
  wandb_watch:
51
- wandb_name: 123-9b
52
  wandb_log_model:
53
 
54
- gradient_accumulation_steps: 64
 
 
 
 
 
 
 
 
 
55
  micro_batch_size: 1
56
  num_epochs: 2
57
  optimizer: adamw_torch
@@ -74,9 +95,9 @@ logging_steps: 1
74
  xformers_attention:
75
  flash_attention: true
76
 
77
- warmup_ratio: 0.05
78
  evals_per_epoch: 4
79
  saves_per_epoch: 1
80
  debug:
81
- weight_decay: 0.0
82
  special_tokens:
 
9
  strict: false
10
 
11
  datasets:
12
+ - path: PocketDoc/Dans-MemoryCore-CoreCurriculum-Small
13
+ type: sharegpt
14
+ conversation: chatml
15
+ - path: AquaV/Chemical-Biological-Safety-Applications-Sharegpt
16
+ type: sharegpt
17
+ conversation: chatml
18
+ - path: AquaV/Energetic-Materials-Sharegpt
19
  type: sharegpt
20
  conversation: chatml
21
  - path: lodrick-the-lafted/NopmWritingStruct
22
  type: sharegpt
23
  conversation: chatml
24
+ - path: NewEden/Claude-Instruct-5k
25
+ type: sharegpt
26
+ conversation: chatml
27
  - path: kalomaze/Opus_Instruct_25k
28
  type: sharegpt
29
  conversation: chatml
30
  - path: kalomaze/Opus_Instruct_3k
31
  type: sharegpt
32
  conversation: chatml
33
+ - path: NewEden/Stheno-Data-filtered-8k-subset
34
+ type: sharegpt
35
+ conversation: chatml
36
+ - path: Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned
37
  type: sharegpt
38
  conversation: chatml
39
  - path: PJMixers/lodrick-the-lafted_OpusStories-ShareGPT
 
44
  dataset_prepared_path:
45
  val_set_size: 0.05
46
  output_dir: ./outputs/out
47
+ sequence_len: 16384
48
  sample_packing: true
49
  eval_sample_packing: true
50
  pad_to_sequence_len: true
 
57
  lora_target_linear: true
58
  lora_fan_in_fan_out:
59
 
60
+ wandb_project: henbane 7b
61
  wandb_entity:
62
  wandb_watch:
63
+ wandb_name: henbane 7b
64
  wandb_log_model:
65
 
66
+
67
+ plugins:
68
+ - axolotl.integrations.liger.LigerPlugin
69
+ liger_rope: true
70
+ liger_rms_norm: true
71
+ liger_swiglu: true
72
+ liger_fused_linear_cross_entropy: true
73
+
74
+
75
+ gradient_accumulation_steps: 32
76
  micro_batch_size: 1
77
  num_epochs: 2
78
  optimizer: adamw_torch
 
95
  xformers_attention:
96
  flash_attention: true
97
 
98
+ warmup_steps: 10
99
  evals_per_epoch: 4
100
  saves_per_epoch: 1
101
  debug:
102
+ weight_decay: 0.5
103
  special_tokens: