s3nh commited on
Commit
39b5678
1 Parent(s): 70d5cee

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +94 -0
README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: openrail
3
+ language:
4
+ - pl
5
+ - en
6
+ library_name: transformers
7
+ pipeline_tag: text-generation
8
+ ---
9
+
10
+ microsoft/phi_1.5 finetuned on s3nh/dolly_instruction_polish.
11
+
12
+ Finetuned with QLora, provided version is adapter merged with base model.
13
+ Load in 4 bit, sequence length set to 1024.
14
+
15
+
16
+ axolotl config
17
+ ```
18
+ base_model: microsoft/phi-2
19
+ model_type: AutoModelForCausalLM
20
+ tokenizer_type: AutoTokenizer
21
+ is_llama_derived_model: false
22
+ trust_remote_code: true
23
+
24
+ load_in_8bit: false
25
+ load_in_4bit: true
26
+ strict: false
27
+
28
+ datasets:
29
+ - path: s3nh/alpaca-dolly-instruction-only-polish
30
+ type: alpaca
31
+
32
+ dataset_prepared_path:
33
+ val_set_size: 0.05
34
+ output_dir: ./phi-2-sft-out
35
+
36
+ sequence_len: 1024
37
+ sample_packing: false # not CURRENTLY compatible with LoRAs
38
+ pad_to_sequence_len:
39
+
40
+ adapter: qlora
41
+ lora_model_dir:
42
+ lora_r: 64
43
+ lora_alpha: 32
44
+ lora_dropout: 0.05
45
+ lora_target_linear: true
46
+ lora_fan_in_fan_out:
47
+
48
+ wandb_project:
49
+ wandb_entity:
50
+ wandb_watch:
51
+ wandb_name:
52
+ wandb_log_model:
53
+
54
+ gradient_accumulation_steps: 1
55
+ micro_batch_size: 1
56
+ num_epochs: 4
57
+ optimizer: adamw_torch
58
+ adam_beta2: 0.95
59
+ adam_epsilon: 0.00001
60
+ max_grad_norm: 1.0
61
+ lr_scheduler: cosine
62
+ learning_rate: 0.000003
63
+
64
+ train_on_inputs: false
65
+ group_by_length: true
66
+ bf16: true
67
+ fp16: false
68
+ tf32: true
69
+
70
+ gradient_checkpointing:
71
+ early_stopping_patience:
72
+ resume_from_checkpoint: false
73
+ local_rank:
74
+ logging_steps: 100
75
+ xformers_attention:
76
+ flash_attention: true
77
+
78
+ warmup_steps: 10
79
+ evals_per_epoch: 4
80
+ saves_per_epoch:
81
+ save_strategy: steps
82
+ save_steps: 5000
83
+ debug:
84
+ deepspeed:
85
+ weight_decay: 0.1
86
+ fsdp:
87
+ fsdp_config:
88
+ resize_token_embeddings_to_32x: true
89
+ special_tokens:
90
+ bos_token: "<|endoftext|>"
91
+ eos_token: "<|endoftext|>"
92
+ unk_token: "<|endoftext|>"
93
+ pad_token: "<|endoftext|>"
94
+ ```