Delta-Vector
commited on
Upload 10 files
Browse files- Holland4bv1.yml +88 -0
- InterLM.yaml +85 -0
- Qwen7B.yaml +79 -0
- customgemma2.py +154 -0
- gemma2FFT.yaml +74 -0
- gemmy.yaml +69 -0
- magstral.yaml +63 -0
- mergeddatasets4b.yml +71 -0
- sdprompter.yaml +92 -0
- tinygemma.yaml +68 -0
Holland4bv1.yml
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: IntervitensInc/Llama-3.1-Minitron-4B-Width-Base-chatml
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
|
5 |
+
load_in_8bit: false
|
6 |
+
load_in_4bit: false
|
7 |
+
strict: false
|
8 |
+
|
9 |
+
datasets:
|
10 |
+
- path: NewEden/Gryphe-3.5-16k-Subset
|
11 |
+
type: sharegpt
|
12 |
+
conversation: chatml
|
13 |
+
- path: Epiculous/Synthstruct-Gens-v1.1-Filtered-n-Cleaned
|
14 |
+
type: sharegpt
|
15 |
+
conversation: chatml
|
16 |
+
- path: anthracite-org/kalo-opus-instruct-22k-no-refusal
|
17 |
+
type: sharegpt
|
18 |
+
conversation: chatml
|
19 |
+
- path: PJMixers/lodrick-the-lafted_OpusStories-ShareGPT
|
20 |
+
type: sharegpt
|
21 |
+
conversation: chatml
|
22 |
+
|
23 |
+
chat_template: chatml
|
24 |
+
|
25 |
+
val_set_size: 0.01
|
26 |
+
output_dir: ./outputs/out
|
27 |
+
|
28 |
+
adapter:
|
29 |
+
lora_r:
|
30 |
+
lora_alpha:
|
31 |
+
lora_dropout:
|
32 |
+
lora_target_linear:
|
33 |
+
|
34 |
+
sequence_len: 16384
|
35 |
+
# sequence_len: 32768
|
36 |
+
sample_packing: true
|
37 |
+
eval_sample_packing: false
|
38 |
+
pad_to_sequence_len: true
|
39 |
+
|
40 |
+
plugins:
|
41 |
+
- axolotl.integrations.liger.LigerPlugin
|
42 |
+
liger_rope: true
|
43 |
+
liger_rms_norm: true
|
44 |
+
liger_swiglu: true
|
45 |
+
liger_fused_linear_cross_entropy: true
|
46 |
+
|
47 |
+
wandb_project: Ohashi4b
|
48 |
+
wandb_entity:
|
49 |
+
wandb_watch:
|
50 |
+
wandb_name: Ohashi4b
|
51 |
+
wandb_log_model:
|
52 |
+
|
53 |
+
gradient_accumulation_steps: 32
|
54 |
+
micro_batch_size: 1
|
55 |
+
num_epochs: 2
|
56 |
+
optimizer: adamw_bnb_8bit
|
57 |
+
lr_scheduler: cosine
|
58 |
+
learning_rate: 0.00002
|
59 |
+
weight_decay: 0.05
|
60 |
+
|
61 |
+
train_on_inputs: false
|
62 |
+
group_by_length: false
|
63 |
+
bf16: auto
|
64 |
+
fp16:
|
65 |
+
tf32: true
|
66 |
+
|
67 |
+
gradient_checkpointing: true
|
68 |
+
early_stopping_patience:
|
69 |
+
resume_from_checkpoint:
|
70 |
+
local_rank:
|
71 |
+
logging_steps: 1
|
72 |
+
xformers_attention:
|
73 |
+
flash_attention: true
|
74 |
+
|
75 |
+
warmup_ratio: 0.1
|
76 |
+
evals_per_epoch: 4
|
77 |
+
eval_table_size:
|
78 |
+
eval_max_new_tokens: 128
|
79 |
+
saves_per_epoch: 1
|
80 |
+
|
81 |
+
debug:
|
82 |
+
deepspeed:
|
83 |
+
fsdp:
|
84 |
+
fsdp_config:
|
85 |
+
|
86 |
+
special_tokens:
|
87 |
+
pad_token: <|finetune_right_pad_id|>
|
88 |
+
|
InterLM.yaml
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: internlm/internlm2_5-1_8b
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
|
5 |
+
trust_remote_code: true
|
6 |
+
|
7 |
+
load_in_8bit: false
|
8 |
+
load_in_4bit: false
|
9 |
+
strict: false
|
10 |
+
|
11 |
+
datasets:
|
12 |
+
- path: lodrick-the-lafted/NopmWritingStruct
|
13 |
+
type: sharegpt
|
14 |
+
conversation: chatml
|
15 |
+
- path: NewEden/Kalo-Opus-Instruct-25K-Refusal-killed
|
16 |
+
type: sharegpt
|
17 |
+
conversation: chatml
|
18 |
+
- path: NewEDen/Claude-Data-Anon-Killed
|
19 |
+
type: sharegpt
|
20 |
+
conversation: chatml
|
21 |
+
- path: MangoHQ/Gryphe-3.5-16k-Subset
|
22 |
+
type: sharegpt
|
23 |
+
conversation: chatml
|
24 |
+
- path: PJMixers/lodrick-the-lafted_OpusStories-ShareGPT
|
25 |
+
type: sharegpt
|
26 |
+
conversation: chatml
|
27 |
+
- path: MangoHQ/opus-sharegpt2
|
28 |
+
type: sharegpt
|
29 |
+
conversation: chatml
|
30 |
+
- path: MangoHQ/opus-sharegpt1
|
31 |
+
type: sharegpt
|
32 |
+
conversation: chatml
|
33 |
+
|
34 |
+
chat_template: chatml
|
35 |
+
dataset_prepared_path:
|
36 |
+
val_set_size: 0.05
|
37 |
+
output_dir: ./outputs/out
|
38 |
+
sequence_len: 4096
|
39 |
+
sample_packing: true
|
40 |
+
eval_sample_packing: true
|
41 |
+
pad_to_sequence_len: true
|
42 |
+
|
43 |
+
adapter:
|
44 |
+
lora_model_dir:
|
45 |
+
lora_r:
|
46 |
+
lora_alpha:
|
47 |
+
lora_dropout:
|
48 |
+
lora_target_linear: true
|
49 |
+
lora_fan_in_fan_out:
|
50 |
+
|
51 |
+
wandb_project: Aleah-1.8B
|
52 |
+
wandb_entity:
|
53 |
+
wandb_watch:
|
54 |
+
wandb_name: Aleah1.8BV2
|
55 |
+
wandb_log_model:
|
56 |
+
|
57 |
+
gradient_accumulation_steps: 64
|
58 |
+
micro_batch_size: 1
|
59 |
+
num_epochs: 2
|
60 |
+
optimizer: adamw_torch
|
61 |
+
lr_scheduler: cosine
|
62 |
+
learning_rate: 0.00001
|
63 |
+
|
64 |
+
train_on_inputs: false
|
65 |
+
group_by_length: false
|
66 |
+
bf16: auto
|
67 |
+
fp16:
|
68 |
+
tf32: true
|
69 |
+
|
70 |
+
gradient_checkpointing: true
|
71 |
+
gradient_checkpointing_kwargs:
|
72 |
+
use_reentrant: false
|
73 |
+
early_stopping_patience:
|
74 |
+
resume_from_checkpoint:
|
75 |
+
local_rank:
|
76 |
+
logging_steps: 1
|
77 |
+
xformers_attention:
|
78 |
+
flash_attention: true
|
79 |
+
|
80 |
+
warmup_ratio:
|
81 |
+
evals_per_epoch: 4
|
82 |
+
saves_per_epoch: 1
|
83 |
+
debug:
|
84 |
+
weight_decay: 0.0
|
85 |
+
special_tokens:
|
Qwen7B.yaml
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: Qwen/Qwen2-7B
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
|
5 |
+
trust_remote_code: true
|
6 |
+
|
7 |
+
load_in_8bit: false
|
8 |
+
load_in_4bit: false
|
9 |
+
strict: false
|
10 |
+
|
11 |
+
datasets:
|
12 |
+
- path: lodrick-the-lafted/NopmWritingStruct
|
13 |
+
type: sharegpt
|
14 |
+
conversation: chatml
|
15 |
+
- path: kalomaze/Opus_Instruct_25k
|
16 |
+
type: sharegpt
|
17 |
+
conversation: chatml
|
18 |
+
- path: kalomaze/Opus_Instruct_3k
|
19 |
+
type: sharegpt
|
20 |
+
conversation: chatml
|
21 |
+
- path: NewEden/Claude-Data-Anon-Killed
|
22 |
+
type: sharegpt
|
23 |
+
conversation: chatml
|
24 |
+
- path: PJMixers/lodrick-the-lafted_OpusStories-ShareGPT
|
25 |
+
type: sharegpt
|
26 |
+
conversation: chatml
|
27 |
+
|
28 |
+
chat_template: chatml
|
29 |
+
dataset_prepared_path:
|
30 |
+
val_set_size: 0.05
|
31 |
+
output_dir: ./outputs/out
|
32 |
+
sequence_len: 32768
|
33 |
+
sample_packing: true
|
34 |
+
eval_sample_packing: true
|
35 |
+
pad_to_sequence_len: true
|
36 |
+
|
37 |
+
adapter:
|
38 |
+
lora_model_dir:
|
39 |
+
lora_r:
|
40 |
+
lora_alpha:
|
41 |
+
lora_dropout:
|
42 |
+
lora_target_linear: true
|
43 |
+
lora_fan_in_fan_out:
|
44 |
+
|
45 |
+
wandb_project: Magnum-9b
|
46 |
+
wandb_entity:
|
47 |
+
wandb_watch:
|
48 |
+
wandb_name: 123-9b
|
49 |
+
wandb_log_model:
|
50 |
+
|
51 |
+
gradient_accumulation_steps: 64
|
52 |
+
micro_batch_size: 1
|
53 |
+
num_epochs: 2
|
54 |
+
optimizer: adamw_torch
|
55 |
+
lr_scheduler: cosine
|
56 |
+
learning_rate: 0.00002
|
57 |
+
|
58 |
+
train_on_inputs: false
|
59 |
+
group_by_length: false
|
60 |
+
bf16: auto
|
61 |
+
fp16:
|
62 |
+
tf32: true
|
63 |
+
|
64 |
+
gradient_checkpointing: true
|
65 |
+
gradient_checkpointing_kwargs:
|
66 |
+
use_reentrant: false
|
67 |
+
early_stopping_patience:
|
68 |
+
resume_from_checkpoint:
|
69 |
+
local_rank:
|
70 |
+
logging_steps: 1
|
71 |
+
xformers_attention:
|
72 |
+
flash_attention: true
|
73 |
+
|
74 |
+
warmup_ratio: 0.05
|
75 |
+
evals_per_epoch: 4
|
76 |
+
saves_per_epoch: 1
|
77 |
+
debug:
|
78 |
+
weight_decay: 0.0
|
79 |
+
special_tokens:
|
customgemma2.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Module containing the CustomGemma2PromptTokenizingStrategy class"""
|
2 |
+
|
3 |
+
# Import necessary modules and functions
|
4 |
+
import copy
|
5 |
+
import logging
|
6 |
+
from collections import defaultdict
|
7 |
+
from typing import Generator, List, Tuple
|
8 |
+
|
9 |
+
# Import from axolotl package
|
10 |
+
from axolotl.prompt_tokenizers import (
|
11 |
+
PromptTokenizingStrategy,
|
12 |
+
parse_tokenized_to_result,
|
13 |
+
tokenize_prompt_default,
|
14 |
+
)
|
15 |
+
|
16 |
+
# Set up logging
|
17 |
+
LOG = logging.getLogger("axolotl")
|
18 |
+
|
19 |
+
# Define a constant token ID to ignore
|
20 |
+
IGNORE_TOKEN_ID = -100
|
21 |
+
|
22 |
+
|
23 |
+
class CustomGemma2PromptTokenizingStrategy(PromptTokenizingStrategy):
|
24 |
+
"""
|
25 |
+
Tokenizing strategy for CustomGemma2.
|
26 |
+
"""
|
27 |
+
|
28 |
+
def __init__(self, prompter, tokenizer, *args, **kwargs):
|
29 |
+
# Call the superclass' constructor
|
30 |
+
super().__init__(prompter, tokenizer, *args, **kwargs)
|
31 |
+
|
32 |
+
def tokenize_prompt(self, prompt):
|
33 |
+
# Tokenize the prompt based on its conversations
|
34 |
+
result, current_len = tokenize_prompt_default()
|
35 |
+
|
36 |
+
# We don't want to remove the BOS token for the first turn
|
37 |
+
strip_bos = False
|
38 |
+
|
39 |
+
# Sometimes it gets named 'conversations' and other times 'conversation'
|
40 |
+
if "conversations" in prompt:
|
41 |
+
conversation_name = "conversations"
|
42 |
+
elif "conversation" in prompt:
|
43 |
+
conversation_name = "conversation"
|
44 |
+
else:
|
45 |
+
LOG.warning(f"sample does not contain 'conversations' or 'conversation'")
|
46 |
+
exit()
|
47 |
+
|
48 |
+
# Iterate over each conversation turn in the prompt
|
49 |
+
num_turns = len(prompt[conversation_name])
|
50 |
+
for i, turn in enumerate(prompt[conversation_name]):
|
51 |
+
# Strip BOS token and add a new line to the beginning if it's not the first turn
|
52 |
+
if i == 0:
|
53 |
+
strip_bos = False
|
54 |
+
add_new_line = ""
|
55 |
+
else:
|
56 |
+
strip_bos = True
|
57 |
+
add_new_line = "\n"
|
58 |
+
|
59 |
+
# Check if this is the last turn, so we know to add the EOS token
|
60 |
+
if i == num_turns - 1:
|
61 |
+
end_of_text = True
|
62 |
+
else:
|
63 |
+
end_of_text = False
|
64 |
+
|
65 |
+
# Get correct roles and messages
|
66 |
+
sharegpt_from, sharegpt_value = turn["from"].strip(), turn["value"].strip()
|
67 |
+
if sharegpt_from == "system":
|
68 |
+
role_name = "system"
|
69 |
+
elif sharegpt_from == "human":
|
70 |
+
role_name = "user"
|
71 |
+
elif sharegpt_from == "human-chat":
|
72 |
+
role_name = "user"
|
73 |
+
sharegpt_value = f"{turn['name'].strip()}: {sharegpt_value}"
|
74 |
+
elif sharegpt_from == "gpt":
|
75 |
+
role_name = "model"
|
76 |
+
elif sharegpt_from == "gpt-chat":
|
77 |
+
role_name = "model"
|
78 |
+
sharegpt_value = f"{turn['name'].strip()}: {sharegpt_value}"
|
79 |
+
else:
|
80 |
+
LOG.warning(f"'from' contains an unhandled string: {sharegpt_from}")
|
81 |
+
exit()
|
82 |
+
|
83 |
+
# Get tokens which will be masked out if using train_on_inputs: false
|
84 |
+
prefix = self._tokenize(
|
85 |
+
f"{add_new_line}<start_of_turn>{role_name}\n",
|
86 |
+
add_eos_token=False,
|
87 |
+
strip_bos_token=strip_bos,
|
88 |
+
)
|
89 |
+
|
90 |
+
# Get entire tokenized turn
|
91 |
+
res = self._tokenize(
|
92 |
+
f"{add_new_line}<start_of_turn>{role_name}\n"
|
93 |
+
f"{sharegpt_value.strip()}<end_of_turn>",
|
94 |
+
add_eos_token=end_of_text,
|
95 |
+
strip_bos_token=strip_bos,
|
96 |
+
)
|
97 |
+
|
98 |
+
# Handle masked user turn
|
99 |
+
if (
|
100 |
+
self.train_on_inputs is False
|
101 |
+
and (
|
102 |
+
sharegpt_from == "system"
|
103 |
+
or sharegpt_from == "human"
|
104 |
+
or sharegpt_from == "human-chat"
|
105 |
+
)
|
106 |
+
):
|
107 |
+
labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
|
108 |
+
# Handle partially masked model turn
|
109 |
+
elif (
|
110 |
+
self.train_on_inputs is False
|
111 |
+
and (
|
112 |
+
sharegpt_from == "gpt"
|
113 |
+
or sharegpt_from == "gpt-chat"
|
114 |
+
)
|
115 |
+
):
|
116 |
+
labels = (
|
117 |
+
[IGNORE_TOKEN_ID] * len(prefix["input_ids"]) # Mask the prefix
|
118 |
+
+ [*copy.deepcopy(res["input_ids"])][len(prefix["input_ids"]):]
|
119 |
+
)
|
120 |
+
# Handle unmasked turn
|
121 |
+
else:
|
122 |
+
labels = res["input_ids"]
|
123 |
+
|
124 |
+
# Parse tokenized result and update current length
|
125 |
+
result, current_len = parse_tokenized_to_result(
|
126 |
+
result,
|
127 |
+
current_len,
|
128 |
+
res,
|
129 |
+
labels,
|
130 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
131 |
+
)
|
132 |
+
|
133 |
+
return result
|
134 |
+
|
135 |
+
|
136 |
+
# TODO: Remove this as it doesn't get used
|
137 |
+
class CustomGemma2Prompter:
|
138 |
+
"""
|
139 |
+
Prompter for CustomGemma2.
|
140 |
+
"""
|
141 |
+
|
142 |
+
def __init__(self, *args, **kwargs):
|
143 |
+
# Constructor does nothing
|
144 |
+
pass
|
145 |
+
|
146 |
+
|
147 |
+
# Function to load the CustomGemma2PromptTokenizingStrategy
|
148 |
+
def load(tokenizer, cfg):
|
149 |
+
return CustomGemma2PromptTokenizingStrategy(
|
150 |
+
CustomGemma2Prompter(), # TODO: Remove this as it doesn't get used
|
151 |
+
tokenizer,
|
152 |
+
cfg.train_on_inputs,
|
153 |
+
cfg.sequence_len
|
154 |
+
)
|
gemma2FFT.yaml
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: google/gemma-2-9b
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
|
5 |
+
load_in_8bit: false
|
6 |
+
load_in_4bit: false
|
7 |
+
strict: false
|
8 |
+
|
9 |
+
chat_template: gemma
|
10 |
+
train_on_eos: true
|
11 |
+
datasets:
|
12 |
+
- path: NewEden/Kalo-Opus-Instruct-22k-Refusal-Murdered
|
13 |
+
type: chat_template
|
14 |
+
chat_template: gemma
|
15 |
+
drop_system_message: true
|
16 |
+
- path: NewEden/Gryphe-3.5-16k-Subset
|
17 |
+
type: chat_template
|
18 |
+
chat_template: gemma
|
19 |
+
drop_system_message: true
|
20 |
+
- path: Epiculous/Synthstruct-Gens-v1-Filtered-n-Cleaned
|
21 |
+
type: chat_template
|
22 |
+
chat_template: gemma
|
23 |
+
drop_system_message: true
|
24 |
+
|
25 |
+
val_set_size: 0.02
|
26 |
+
output_dir: ./outputs/out
|
27 |
+
|
28 |
+
sequence_len: 8192
|
29 |
+
sample_packing: true
|
30 |
+
eval_sample_packing: false
|
31 |
+
pad_to_sequence_len: true
|
32 |
+
|
33 |
+
wandb_project: Magnum9B
|
34 |
+
wandb_entity:
|
35 |
+
wandb_watch:
|
36 |
+
wandb_name: Magnum9B
|
37 |
+
wandb_log_model:
|
38 |
+
|
39 |
+
|
40 |
+
gradient_accumulation_steps: 12
|
41 |
+
micro_batch_size: 1
|
42 |
+
num_epochs: 1
|
43 |
+
#optimizer: adamw_bnb_8bit
|
44 |
+
optimizer: paged_adamw_8bit
|
45 |
+
lr_scheduler: cosine
|
46 |
+
#learning_rate: 1e-5
|
47 |
+
learning_rate: 8e-6
|
48 |
+
|
49 |
+
train_on_inputs: false
|
50 |
+
group_by_length: false
|
51 |
+
bf16: true
|
52 |
+
fp16: false
|
53 |
+
tf32: true
|
54 |
+
|
55 |
+
gradient_checkpointing: true
|
56 |
+
early_stopping_patience:
|
57 |
+
resume_from_checkpoint:
|
58 |
+
local_rank:
|
59 |
+
logging_steps: 1
|
60 |
+
xformers_attention:
|
61 |
+
flash_attention: true
|
62 |
+
deepspeed: ./deepspeed_configs/zero3_bf16.json
|
63 |
+
|
64 |
+
warmup_steps: 15
|
65 |
+
evals_per_epoch: 0
|
66 |
+
eval_table_size:
|
67 |
+
eval_max_new_tokens: 128
|
68 |
+
saves_per_epoch: 5
|
69 |
+
save_total_limit: 3
|
70 |
+
debug:
|
71 |
+
weight_decay: 0.0
|
72 |
+
fsdp:
|
73 |
+
fsdp_config:
|
74 |
+
special_tokens:
|
gemmy.yaml
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: IntervitensInc/gemma-2-9b-chatml
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
|
5 |
+
load_in_8bit: false
|
6 |
+
load_in_4bit: true
|
7 |
+
strict: false
|
8 |
+
|
9 |
+
# huggingface repo
|
10 |
+
datasets:
|
11 |
+
- path: NewEden/Kalo-Opus-Instruct-25K-Refusal-killed
|
12 |
+
type: chatml
|
13 |
+
- path: NewEden/Gryphe-3.5-16k-Subset
|
14 |
+
type: chatml
|
15 |
+
|
16 |
+
val_set_size: 0.05
|
17 |
+
output_dir: ./outputs/out
|
18 |
+
|
19 |
+
adapter: lora
|
20 |
+
peft_use_rslora: true
|
21 |
+
lora_r: 32
|
22 |
+
lora_alpha: 32
|
23 |
+
lora_dropout: 0.05
|
24 |
+
lora_target_linear: true
|
25 |
+
|
26 |
+
sequence_len: 8192
|
27 |
+
sample_packing: true
|
28 |
+
eval_sample_packing: false
|
29 |
+
pad_to_sequence_len: true
|
30 |
+
|
31 |
+
wandb_project: magnum 9b
|
32 |
+
wandb_entity:
|
33 |
+
wandb_watch:
|
34 |
+
wandb_name: magnum 9b inst
|
35 |
+
wandb_log_model:
|
36 |
+
|
37 |
+
|
38 |
+
gradient_accumulation_steps: 32
|
39 |
+
micro_batch_size: 1
|
40 |
+
num_epochs: 2
|
41 |
+
optimizer: adamw_bnb_8bit
|
42 |
+
lr_scheduler: cosine
|
43 |
+
learning_rate: 0.00002
|
44 |
+
|
45 |
+
train_on_inputs: false
|
46 |
+
group_by_length: false
|
47 |
+
bf16: auto
|
48 |
+
fp16:
|
49 |
+
tf32: true
|
50 |
+
|
51 |
+
gradient_checkpointing: true
|
52 |
+
early_stopping_patience:
|
53 |
+
resume_from_checkpoint:
|
54 |
+
local_rank:
|
55 |
+
logging_steps: 1
|
56 |
+
xformers_attention:
|
57 |
+
flash_attention: true
|
58 |
+
|
59 |
+
warmup_ratio: 0.1
|
60 |
+
evals_per_epoch: 4
|
61 |
+
eval_table_size:
|
62 |
+
eval_max_new_tokens: 128
|
63 |
+
saves_per_epoch: 1
|
64 |
+
debug:
|
65 |
+
deepspeed: zero2.json
|
66 |
+
weight_decay: 0.0
|
67 |
+
fsdp:
|
68 |
+
fsdp_config:
|
69 |
+
special_tokens:
|
magstral.yaml
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: mistralai/Mistral-7B-v0.3
|
2 |
+
model_type: MistralForCausalLM
|
3 |
+
tokenizer_type: LlamaTokenizer
|
4 |
+
|
5 |
+
load_in_8bit: false
|
6 |
+
load_in_4bit: false
|
7 |
+
strict: false
|
8 |
+
|
9 |
+
datasets:
|
10 |
+
- path: MangoHQ/Kalo-Opus-Instruct-22k-Refusal-Murdered
|
11 |
+
type: sharegpt
|
12 |
+
- path: MangoHQ/Gryphe-3.5-16k-Subset
|
13 |
+
type: sharegpt
|
14 |
+
- path: Epiculous/Synthstruct-Gens-v1-Filtered-n-Cleaned
|
15 |
+
type: sharegpt
|
16 |
+
|
17 |
+
dataset_prepared_path:
|
18 |
+
val_set_size: 0.05
|
19 |
+
output_dir: ./outputs/out
|
20 |
+
|
21 |
+
sequence_len: 16384
|
22 |
+
sample_packing: true
|
23 |
+
pad_to_sequence_len: true
|
24 |
+
eval_sample_packing: false
|
25 |
+
|
26 |
+
wandb_project: Magstral 7B
|
27 |
+
wandb_entity:
|
28 |
+
wandb_watch:
|
29 |
+
wandb_name: Magstral 7B
|
30 |
+
wandb_log_model:
|
31 |
+
|
32 |
+
gradient_accumulation_steps: 4
|
33 |
+
micro_batch_size: 2
|
34 |
+
num_epochs: 4
|
35 |
+
optimizer: adamw_bnb_8bit
|
36 |
+
lr_scheduler: cosine
|
37 |
+
learning_rate: 0.000005
|
38 |
+
|
39 |
+
train_on_inputs: false
|
40 |
+
group_by_length: false
|
41 |
+
bf16: auto
|
42 |
+
fp16:
|
43 |
+
tf32: false
|
44 |
+
|
45 |
+
gradient_checkpointing: true
|
46 |
+
early_stopping_patience:
|
47 |
+
resume_from_checkpoint:
|
48 |
+
local_rank:
|
49 |
+
logging_steps: 1
|
50 |
+
xformers_attention:
|
51 |
+
flash_attention: true
|
52 |
+
|
53 |
+
warmup_steps: 10
|
54 |
+
evals_per_epoch: 4
|
55 |
+
eval_table_size:
|
56 |
+
eval_max_new_tokens: 128
|
57 |
+
saves_per_epoch: 1
|
58 |
+
debug:
|
59 |
+
deepspeed:
|
60 |
+
weight_decay: 0.0
|
61 |
+
fsdp:
|
62 |
+
fsdp_config:
|
63 |
+
special_tokens:
|
mergeddatasets4b.yml
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: IntervitensInc/Llama-3.1-Minitron-4B-Width-Base-chatml
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
|
5 |
+
load_in_8bit: false
|
6 |
+
load_in_4bit: false
|
7 |
+
strict: false
|
8 |
+
|
9 |
+
datasets:
|
10 |
+
- path: Edens-Gate/jellymazeisacuteandfunnybear
|
11 |
+
type: sharegpt
|
12 |
+
conversation: chatml
|
13 |
+
|
14 |
+
chat_template: chatml
|
15 |
+
|
16 |
+
val_set_size: 0.01
|
17 |
+
output_dir: ./outputs/out
|
18 |
+
|
19 |
+
adapter:
|
20 |
+
lora_r:
|
21 |
+
lora_alpha:
|
22 |
+
lora_dropout:
|
23 |
+
lora_target_linear:
|
24 |
+
|
25 |
+
sequence_len: 16384
|
26 |
+
# sequence_len: 32768
|
27 |
+
sample_packing: true
|
28 |
+
eval_sample_packing: false
|
29 |
+
pad_to_sequence_len: true
|
30 |
+
|
31 |
+
wandb_project: tinymagnumr6
|
32 |
+
wandb_entity:
|
33 |
+
wandb_watch:
|
34 |
+
wandb_name: tinymagnumr6
|
35 |
+
wandb_log_model:
|
36 |
+
|
37 |
+
gradient_accumulation_steps: 8
|
38 |
+
micro_batch_size: 1
|
39 |
+
num_epochs: 1
|
40 |
+
optimizer: adamw_bnb_8bit
|
41 |
+
lr_scheduler: cosine
|
42 |
+
learning_rate: 0.000003
|
43 |
+
weight_decay: 0.05
|
44 |
+
|
45 |
+
train_on_inputs: false
|
46 |
+
group_by_length: false
|
47 |
+
bf16: auto
|
48 |
+
fp16:
|
49 |
+
tf32: true
|
50 |
+
|
51 |
+
gradient_checkpointing: true
|
52 |
+
early_stopping_patience:
|
53 |
+
resume_from_checkpoint:
|
54 |
+
local_rank:
|
55 |
+
logging_steps: 1
|
56 |
+
xformers_attention:
|
57 |
+
flash_attention: true
|
58 |
+
|
59 |
+
warmup_ratio: 0.1
|
60 |
+
evals_per_epoch: 4
|
61 |
+
eval_table_size:
|
62 |
+
eval_max_new_tokens: 128
|
63 |
+
saves_per_epoch: 1
|
64 |
+
|
65 |
+
debug:
|
66 |
+
deepspeed:
|
67 |
+
fsdp:
|
68 |
+
fsdp_config:
|
69 |
+
|
70 |
+
special_tokens:
|
71 |
+
pad_token: <|finetune_right_pad_id|>
|
sdprompter.yaml
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: MangyMango/Qwen-1.5B-Claude
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
|
5 |
+
trust_remote_code: true
|
6 |
+
|
7 |
+
load_in_8bit: false
|
8 |
+
load_in_4bit: false
|
9 |
+
strict: false
|
10 |
+
|
11 |
+
datasets:
|
12 |
+
- path: NewEden/CivitAI-Prompts
|
13 |
+
# type:
|
14 |
+
# system_prompt: ""
|
15 |
+
# system_format: "<|im_start|>system\n{system}<|im_end|>\n"
|
16 |
+
# field_system: instruction
|
17 |
+
# field_instruction: input
|
18 |
+
# field_input: ""
|
19 |
+
# field_output: output
|
20 |
+
# no_input_format: "<|im_start|>user\n{instruction}<|im_end|>\n<|im_start|>assistant\n"
|
21 |
+
|
22 |
+
# system_prompt: ""
|
23 |
+
# field_instruction: instruction
|
24 |
+
# field_input: input
|
25 |
+
# field_output: output
|
26 |
+
# format: |-
|
27 |
+
# <|im_start|>system
|
28 |
+
# {instruction}<|im_end|>
|
29 |
+
# <|im_start|>user
|
30 |
+
# {input}<|im_end|>
|
31 |
+
# <|im_start|>assistant
|
32 |
+
# {output}
|
33 |
+
|
34 |
+
type: alpaca
|
35 |
+
conversation: mpt-30b-instruct
|
36 |
+
# field_system: instruction
|
37 |
+
# field_instruction: input
|
38 |
+
# field_input: input
|
39 |
+
# field_output: output
|
40 |
+
chat_template: alpaca
|
41 |
+
|
42 |
+
dataset_prepared_path:
|
43 |
+
val_set_size: 0.05
|
44 |
+
output_dir: ./outputs/out2
|
45 |
+
sequence_len: 2048
|
46 |
+
sample_packing: true
|
47 |
+
eval_sample_packing: true
|
48 |
+
pad_to_sequence_len: true
|
49 |
+
|
50 |
+
adapter:
|
51 |
+
lora_model_dir:
|
52 |
+
lora_r:
|
53 |
+
lora_alpha:
|
54 |
+
lora_dropout:
|
55 |
+
lora_target_linear: true
|
56 |
+
lora_fan_in_fan_out:
|
57 |
+
|
58 |
+
wandb_project:
|
59 |
+
wandb_entity:
|
60 |
+
wandb_watch:
|
61 |
+
wandb_name:
|
62 |
+
wandb_log_model:
|
63 |
+
|
64 |
+
gradient_accumulation_steps: 64
|
65 |
+
micro_batch_size: 1
|
66 |
+
num_epochs: 3
|
67 |
+
optimizer: adamw_torch
|
68 |
+
lr_scheduler: cosine
|
69 |
+
learning_rate: 0.00002
|
70 |
+
|
71 |
+
train_on_inputs: false
|
72 |
+
group_by_length: false
|
73 |
+
bf16: auto
|
74 |
+
fp16:
|
75 |
+
tf32: true
|
76 |
+
|
77 |
+
gradient_checkpointing: true
|
78 |
+
gradient_checkpointing_kwargs:
|
79 |
+
use_reentrant: false
|
80 |
+
early_stopping_patience:
|
81 |
+
resume_from_checkpoint:
|
82 |
+
local_rank:
|
83 |
+
logging_steps: 1
|
84 |
+
xformers_attention:
|
85 |
+
flash_attention: true
|
86 |
+
|
87 |
+
warmup_ratio: 0.05
|
88 |
+
evals_per_epoch: 4
|
89 |
+
saves_per_epoch: 1
|
90 |
+
debug:
|
91 |
+
weight_decay: 0.0
|
92 |
+
special_tokens:
|
tinygemma.yaml
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: SillyTilly/google_gemma-2-2b
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
|
5 |
+
load_in_8bit: false
|
6 |
+
load_in_4bit: false
|
7 |
+
strict: false
|
8 |
+
|
9 |
+
# huggingface repo
|
10 |
+
datasets:
|
11 |
+
- path: NewEden/Kalo-Opus-Instruct-25K-Refusal-killed
|
12 |
+
- type: chatml
|
13 |
+
- path: NewEden/Gryphe-3.5-16k-Subset
|
14 |
+
- type: chatml
|
15 |
+
|
16 |
+
val_set_size: 0.0
|
17 |
+
output_dir: ./outputs/out
|
18 |
+
|
19 |
+
adapter:
|
20 |
+
lora_r:
|
21 |
+
lora_alpha:
|
22 |
+
lora_dropout:
|
23 |
+
lora_target_linear:
|
24 |
+
|
25 |
+
sequence_len: 8192
|
26 |
+
sample_packing: true
|
27 |
+
eval_sample_packing: false
|
28 |
+
pad_to_sequence_len: true
|
29 |
+
|
30 |
+
wandb_project: TinyGemmy
|
31 |
+
wandb_entity:
|
32 |
+
wandb_watch:
|
33 |
+
wandb_name: TinyGemmy
|
34 |
+
wandb_log_model:
|
35 |
+
|
36 |
+
|
37 |
+
gradient_accumulation_steps: 32
|
38 |
+
micro_batch_size: 1
|
39 |
+
num_epochs: 2
|
40 |
+
optimizer: adamw_bnb_8bit
|
41 |
+
lr_scheduler: cosine
|
42 |
+
learning_rate: 0.00002
|
43 |
+
|
44 |
+
train_on_inputs: false
|
45 |
+
group_by_length: false
|
46 |
+
bf16: auto
|
47 |
+
fp16:
|
48 |
+
tf32: true
|
49 |
+
|
50 |
+
gradient_checkpointing: true
|
51 |
+
early_stopping_patience:
|
52 |
+
resume_from_checkpoint:
|
53 |
+
local_rank:
|
54 |
+
logging_steps: 1
|
55 |
+
xformers_attention:
|
56 |
+
flash_attention: true
|
57 |
+
|
58 |
+
warmup_ratio: 0.1
|
59 |
+
evals_per_epoch: 4
|
60 |
+
eval_table_size:
|
61 |
+
eval_max_new_tokens: 128
|
62 |
+
saves_per_epoch: 1
|
63 |
+
debug:
|
64 |
+
deepspeed:
|
65 |
+
weight_decay: 0.0
|
66 |
+
fsdp:
|
67 |
+
fsdp_config:
|
68 |
+
special_tokens:
|