File size: 1,606 Bytes
ce964b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
base_model: v2ray/Mixtral-8x22B-v0.1
model_type: AutoModelForCausalLM
tokenizer_type: LlamaTokenizer
trust_remote_code: true

load_in_8bit: false
load_in_4bit: true
strict: false

datasets:
  - path: philschmid/guanaco-sharegpt-style
    type: sharegpt
    prompt_style: chatml

dataset_prepared_path: last_run_prepared
val_set_size: 0
output_dir: ./models/Goku-8x22B-v0.1

## You can optionally freeze the entire model and unfreeze a subset of parameters
unfrozen_parameters:
#  - ^lm_head.weight$
#  - ^model.embed_tokens.weight$[:32000]
#  - model.layers.2[0-9]+.block_sparse_moe.gate
#  - model.layers.2[0-9]+.block_sparse_moe.experts
#  - model.layers.3[0-9]+.block_sparse_moe.gate
#  - model.layers.3[0-9]+.block_sparse_moe.experts

model_config:
  output_router_logits: true

sequence_len: 2048
sample_packing: false
pad_to_sequence_len: true

adapter: qlora
lora_model_dir:

lora_r: 16
lora_alpha: 8
lora_dropout: 0.05
lora_target_modules:
lora_target_linear: true
lora_fan_in_fan_out:

gradient_accumulation_steps: 4
micro_batch_size: 6
num_epochs: 1

optimizer: paged_adamw_8bit
lr_scheduler: cosine
learning_rate: 0.0002

train_on_inputs:
group_by_length: false
bf16: auto
fp16: false
tf32: false

gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true

loss_watchdog_threshold: 5.0
loss_watchdog_patience: 3

warmup_steps: 10
evals_per_epoch: 4
eval_table_size:
eval_max_new_tokens: 128
saves_per_epoch: 1
debug:
weight_decay: 0.0

special_tokens:
  eos_token: "<|im_end|>"
tokens:
  - "<|im_start|>"