Gustav0-Freind commited on
Commit
8ccc65d
1 Parent(s): 5cde7af

Upload config.yml

Browse files
Files changed (1) hide show
  1. config.yml +73 -0
config.yml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: /mnt/text-generation-webui/models/yarn-llama2-gptq/
2
+ is_llama_derived_model: false
3
+ gptq: true
4
+ gptq_disable_exllama: true
5
+ model_type: AutoModelForCausalLM
6
+ tokenizer_type: LlamaTokenizer
7
+ tokenizer_use_fast: true
8
+ tokenizer_legacy: true
9
+ load_in_8bit: false
10
+ load_in_4bit: false
11
+ strict: false
12
+ push_dataset_to_hub:
13
+ hf_use_auth_token: true
14
+ datasets:
15
+ - path: Steelskull/OpenHermes-2.5-Alpaca
16
+ type: alpaca
17
+ dataset_prepared_path:
18
+ val_set_size: 0.05
19
+ adapter: lora
20
+ lora_model_dir:
21
+ sequence_len: 4096
22
+ sample_packing:
23
+ lora_r: 8
24
+ lora_alpha: 32
25
+ lora_dropout: 0.05
26
+ lora_target_modules:
27
+ - k_proj
28
+ - o_proj
29
+ - q_proj
30
+ - v_proj
31
+ lora_target_linear:
32
+ lora_fan_in_fan_out:
33
+ wandb_project:
34
+ wandb_watch:
35
+ wandb_name:
36
+ wandb_log_model:
37
+ output_dir: ./model-out
38
+ gradient_accumulation_steps: 1
39
+ micro_batch_size: 1
40
+ num_epochs: 4
41
+ optimizer: adamw_torch
42
+ adam_beta2: 0.95
43
+ adam_eps: 0.00001
44
+ max_grad_norm: 1.0
45
+ torchdistx_path:
46
+ lr_scheduler: cosine
47
+ lr_quadratic_warmup: true
48
+ learning_rate: 0.000017
49
+ train_on_inputs: false
50
+ group_by_length: false
51
+ bf16: false
52
+ fp16: false
53
+ float16: true
54
+ tf32: true
55
+ gradient_checkpointing: true
56
+ early_stopping_patience:
57
+ resume_from_checkpoint:
58
+ local_rank:
59
+ logging_steps: 1
60
+ xformers_attention:
61
+ flash_attention:
62
+ sdp_attention:
63
+ flash_optimum:
64
+ warmup_steps: 100
65
+ evals_per_epoch: 4
66
+ saves_per_epoch: 1
67
+ debug:
68
+ deepspeed:
69
+ weight_decay: 0.1
70
+ special_tokens:
71
+ bos_token: "<s>"
72
+ eos_token: "</s>"
73
+ unk_token: "<unk>"