aaditya commited on
Commit
aff4b40
1 Parent(s): 2f12bb8

Create resume_model.yml

Browse files
Files changed (1) hide show
  1. resume_model.yml +70 -0
resume_model.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: microsoft/phi-1_5
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: true
7
+ strict: false
8
+
9
+
10
+ datasets:
11
+ - path: aaditya/alpaca_subset_1
12
+ type: alpaca
13
+ val_set_size: 0
14
+ output_dir: .
15
+
16
+ sequence_len: 2048
17
+ sample_packing: true
18
+ pad_to_sequence_len: true
19
+
20
+ adapter: qlora
21
+ lora_model_dir:
22
+ lora_r: 64
23
+ lora_alpha: 32
24
+ lora_dropout: 0.05
25
+ lora_target_linear: true
26
+ lora_fan_in_fan_out:
27
+
28
+ wandb_project:
29
+ wandb_entity:
30
+ wandb_watch:
31
+ wandb_name:
32
+ wandb_log_model:
33
+
34
+ gradient_accumulation_steps: 2
35
+ micro_batch_size: 20
36
+ num_epochs: 1
37
+ optimizer: adamw_torch
38
+ adam_beta2: 0.95
39
+ adam_epsilon: 0.00001
40
+ max_grad_norm: 1.0
41
+ lr_scheduler: cosine
42
+ learning_rate: 0.000003
43
+
44
+ train_on_inputs: false
45
+ group_by_length: false
46
+ bf16: auto
47
+ fp16:
48
+ tf32: true
49
+
50
+ gradient_checkpointing: true
51
+ gradient_checkpointing_kwargs:
52
+ use_reentrant: True
53
+ early_stopping_patience:
54
+ resume_from_checkpoint:
55
+ local_rank:
56
+ logging_steps: 1
57
+ xformers_attention:
58
+ flash_attention: true
59
+
60
+ warmup_steps: 100
61
+ evals_per_epoch: 4
62
+ saves_per_epoch: 1
63
+ debug:
64
+ deepspeed:
65
+ weight_decay: 0.1
66
+ fsdp:
67
+ fsdp_config:
68
+ resize_token_embeddings_to_32x: true
69
+ special_tokens:
70
+ pad_token: "<|endoftext|>"