byroneverson commited on
Commit
ac008be
1 Parent(s): 91d82bf

Model save

Browse files
Files changed (1) hide show
  1. README.md +155 -0
README.md CHANGED
@@ -1,3 +1,158 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ base_model: byroneverson/LLaVA-v1.5-7B-rehome
7
+ model-index:
8
+ - name: LLaVA-v1.5-7B-rehome-shell-qlora
9
+ results: []
10
  ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
+
18
+ axolotl version: `0.3.0`
19
+ ```yaml
20
+ base_model: byroneverson/LLaVA-v1.5-7B-rehome
21
+ model_type: MistralForCausalLM
22
+ tokenizer_type: LlamaTokenizer
23
+ #is_mistral_derived_model: true
24
+ #
25
+ load_in_8bit: false
26
+ load_in_4bit: true
27
+ strict: false
28
+ #
29
+ datasets:
30
+ - path: byroneverson/shell-cmd-instruct
31
+ type: solar_shell_instruct #alpaca
32
+ dataset_prepared_path: last_run_prepared
33
+ val_set_size: 0.05
34
+ output_dir: ./qlora-out
35
+ #
36
+ # Push checkpoints to hub
37
+ hub_model_id: byroneverson/LLaVA-v1.5-7B-rehome-shell-qlora
38
+ # How to push checkpoints to hub
39
+ # https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy
40
+ hub_strategy: checkpoint
41
+ # Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
42
+ # Required to be true when used in combination with `push_dataset_to_hub`
43
+ hf_use_auth_token: true
44
+ #
45
+ adapter: qlora
46
+ lora_model_dir:
47
+ #
48
+ sequence_len: 2048
49
+ sample_packing: true
50
+ eval_sample_packing: false
51
+ pad_to_sequence_len: true
52
+ #
53
+ lora_r: 128
54
+ lora_alpha: 16
55
+ lora_dropout: 0.05
56
+ lora_target_linear: true
57
+ lora_fan_in_fan_out:
58
+ target_modules: [
59
+ "up_proj",
60
+ "down_proj",
61
+ "gate_proj",
62
+ ]
63
+ #
64
+ wandb_project: "LLaVA-v1.5-7B-rehome-qlora"
65
+ wandb_log_model: "checkpoint"
66
+ wandb_entity:
67
+ wandb_watch:
68
+ wandb_run_id:
69
+ #
70
+ gradient_accumulation_steps: 2 # 1
71
+ micro_batch_size: 1
72
+ num_epochs: 3
73
+ optimizer: paged_lion_8bit #paged_adamw_8bit
74
+ lr_scheduler: cosine
75
+ learning_rate: 0.0002
76
+ #
77
+ train_on_inputs: false
78
+ group_by_length: false
79
+ bf16: false #true
80
+ fp16: true
81
+ tf32: false
82
+ #
83
+ gradient_checkpointing: true
84
+ early_stopping_patience:
85
+ # Resume from a specific checkpoint dir
86
+ resume_from_checkpoint: #last-checkpoint
87
+ # If resume_from_checkpoint isn't set and you simply want it to start where it left off.
88
+ # Be careful with this being turned on between different models.
89
+ auto_resume_from_checkpoints: false
90
+ local_rank:
91
+ logging_steps: 1
92
+ xformers_attention:
93
+ # Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:
94
+ flash_attention: false #true
95
+ flash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only
96
+ flash_attn_rms_norm: false # Whether to use flash-attention rms norm implementation - advanced use only
97
+ flash_attn_fuse_qkv: # Whether to fuse QKV into a single operation
98
+ flash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation
99
+ #
100
+ warmup_steps: 10
101
+ eval_steps: 0.05
102
+ eval_table_size:
103
+ eval_table_max_new_tokens: 128
104
+ save_steps:
105
+ debug: true #
106
+ deepspeed:
107
+ weight_decay: 0.0
108
+ fsdp:
109
+ fsdp_config:
110
+ special_tokens:
111
+ bos_token: "<s>"
112
+ eos_token: "</s>"
113
+ unk_token: "<unk>"
114
+
115
+ ```
116
+
117
+ </details><br>
118
+
119
+ # LLaVA-v1.5-7B-rehome-shell-qlora
120
+
121
+ This model is a fine-tuned version of [byroneverson/LLaVA-v1.5-7B-rehome](https://huggingface.co/byroneverson/LLaVA-v1.5-7B-rehome) on an unknown dataset.
122
+
123
+ ## Model description
124
+
125
+ More information needed
126
+
127
+ ## Intended uses & limitations
128
+
129
+ More information needed
130
+
131
+ ## Training and evaluation data
132
+
133
+ More information needed
134
+
135
+ ## Training procedure
136
+
137
+ ### Training hyperparameters
138
+
139
+ The following hyperparameters were used during training:
140
+ - learning_rate: 0.0002
141
+ - train_batch_size: 1
142
+ - eval_batch_size: 1
143
+ - seed: 42
144
+ - gradient_accumulation_steps: 2
145
+ - total_train_batch_size: 2
146
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
147
+ - lr_scheduler_type: cosine
148
+ - lr_scheduler_warmup_steps: 10
149
+ - num_epochs: 3
150
+ - mixed_precision_training: Native AMP
151
+
152
+ ### Framework versions
153
+
154
+ - PEFT 0.7.2.dev0
155
+ - Transformers 4.37.0.dev0
156
+ - Pytorch 2.0.1+cu117
157
+ - Datasets 2.16.1
158
+ - Tokenizers 0.15.0