Delta-Vector commited on
Commit
0726b86
·
verified ·
1 Parent(s): e26029a

Upload HebaneGodKnows.yml

Browse files
Files changed (1) hide show
  1. HebaneGodKnows.yml +112 -0
HebaneGodKnows.yml ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: arcee-ai/Llama-3.1-SuperNova-Lite
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ #trust_remote_code: true
6
+
7
+ plugins:
8
+ - axolotl.integrations.liger.LigerPlugin
9
+ liger_rope: true
10
+ liger_rms_norm: true
11
+ liger_swiglu: true
12
+ liger_fused_linear_cross_entropy: true
13
+
14
+ load_in_8bit: false
15
+ load_in_4bit: false
16
+ strict: false
17
+
18
+ datasets:
19
+ - path: Gryphe/Sonnet3.5-SlimOrcaDedupCleaned
20
+ type: chat_template
21
+ - path: Nitral-AI/Cybersecurity-ShareGPT
22
+ type: chat_template
23
+ - path: Nitral-AI/Medical_Instruct-ShareGPT
24
+ type: chat_template
25
+ - path: Nitral-AI/Olympiad_Math-ShareGPT
26
+ type: chat_template
27
+ - path: anthracite-org/kalo_opus_misc_240827
28
+ type: chat_template
29
+ - path: NewEden/Claude-Instruct-5k
30
+ type: chat_template
31
+ - path: lodrick-the-lafted/kalo-opus-instruct-3k-filtered
32
+ type: chat_template
33
+ - path: anthracite-org/kalo-opus-instruct-22k-no-refusal
34
+ type: chat_template
35
+ - path: Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned
36
+ type: chat_template
37
+ - path: Epiculous/SynthRP-Gens-v1.1-Filtered-n-Cleaned
38
+ type: chat_template
39
+ - path: anthracite-org/kalo_misc_part2
40
+ type: chat_template
41
+ - path: anthracite-org/kalo_misc_part2
42
+ type: chat_template
43
+ - path: Nitral-AI/Creative_Writing-ShareGPT
44
+ type: chat_template
45
+ - path: NewEden/Gryphe-Sonnet3.5-Charcard-Roleplay-unfiltered
46
+ type: chat_template
47
+
48
+ chat_template: llama3
49
+ shuffle_merged_datasets: true
50
+ default_system_message: "You are an assistant that responds to the user."
51
+ dataset_prepared_path: prepared_dataset_memorycore
52
+ val_set_size: 0.0
53
+ output_dir: ./henbane-8b-r3
54
+
55
+ sequence_len: 8192
56
+ sample_packing: true
57
+ eval_sample_packing: false
58
+ pad_to_sequence_len:
59
+
60
+ adapter:
61
+ lora_model_dir:
62
+ lora_r:
63
+ lora_alpha:
64
+ lora_dropout:
65
+ lora_target_linear:
66
+ lora_fan_in_fan_out:
67
+
68
+ wandb_project: henbane-8b-r3
69
+ wandb_entity:
70
+ wandb_watch:
71
+ wandb_name: henbane-8b-r3
72
+ wandb_log_model:
73
+
74
+ gradient_accumulation_steps: 128
75
+ micro_batch_size: 1
76
+ num_epochs: 4
77
+ optimizer: paged_adamw_8bit
78
+ lr_scheduler: cosine
79
+ #learning_rate: 3e-5
80
+ learning_rate: 1e-5
81
+
82
+ train_on_inputs: false
83
+ group_by_length: false
84
+ bf16: auto
85
+ fp16:
86
+ tf32: false
87
+
88
+ gradient_checkpointing: true
89
+ gradient_checkpointing_kwargs:
90
+ use_reentrant: false
91
+ early_stopping_patience:
92
+ resume_from_checkpoint:
93
+ local_rank:
94
+ logging_steps: 1
95
+ xformers_attention:
96
+ flash_attention: true
97
+ eager_attention: false
98
+
99
+ warmup_steps: 5
100
+ evals_per_epoch:
101
+ eval_table_size:
102
+ eval_max_new_tokens:
103
+ saves_per_epoch: 2
104
+ debug:
105
+ deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json
106
+ weight_decay: 0.05
107
+ fsdp:
108
+ fsdp_config:
109
+ special_tokens:
110
+ pad_token: <|finetune_right_pad_id|>
111
+ eos_token: <|eot_id|>
112
+