BlindSolitaire
commited on
Commit
•
044d480
1
Parent(s):
eb4c34e
Added LORA
Browse files- alpha-prompt_v1/README.md +202 -0
- alpha-prompt_v1/adapter_config.json +28 -0
- alpha-prompt_v1/adapter_model.bin +3 -0
- alpha-prompt_v1/added_tokens.json +6 -0
- alpha-prompt_v1/checkpoint-28576/README.md +219 -0
- alpha-prompt_v1/checkpoint-28576/adapter_config.json +28 -0
- alpha-prompt_v1/checkpoint-28576/adapter_model.bin +3 -0
- alpha-prompt_v1/checkpoint-28576/optimizer.pt +3 -0
- alpha-prompt_v1/checkpoint-28576/rng_state.pth +3 -0
- alpha-prompt_v1/checkpoint-28576/scheduler.pt +3 -0
- alpha-prompt_v1/checkpoint-28576/trainer_state.json +0 -0
- alpha-prompt_v1/checkpoint-28576/training_args.bin +3 -0
- alpha-prompt_v1/config.json +40 -0
- alpha-prompt_v1/runs/Oct16_18-49-32_babylon/events.out.tfevents.1697496580.babylon.1010856.0 +3 -0
- alpha-prompt_v1/runs/Oct16_18-56-39_babylon/events.out.tfevents.1697497007.babylon.1012350.0 +3 -0
- alpha-prompt_v1/runs/Oct16_19-01-12_babylon/events.out.tfevents.1697497280.babylon.1013466.0 +3 -0
- alpha-prompt_v1/runs/Oct16_19-04-08_babylon/events.out.tfevents.1697497455.babylon.1013704.0 +3 -0
- alpha-prompt_v1/runs/Oct16_19-24-00_babylon/events.out.tfevents.1697498648.babylon.1015156.0 +3 -0
- alpha-prompt_v1/special_tokens_map.json +6 -0
- alpha-prompt_v1/tokenizer.model +3 -0
- alpha-prompt_v1/tokenizer_config.json +54 -0
alpha-prompt_v1/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
base_model: Gryphe/MythoMax-L2-13b
|
4 |
+
tags:
|
5 |
+
- generated_from_trainer
|
6 |
+
model-index:
|
7 |
+
- name: lora-out-10
|
8 |
+
results: []
|
9 |
+
---
|
10 |
+
|
11 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
12 |
+
should probably proofread and complete it, then remove this comment. -->
|
13 |
+
|
14 |
+
[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
|
15 |
+
# lora-out-10
|
16 |
+
|
17 |
+
This model is a fine-tuned version of [Gryphe/MythoMax-L2-13b](https://huggingface.co/Gryphe/MythoMax-L2-13b) on the None dataset.
|
18 |
+
It achieves the following results on the evaluation set:
|
19 |
+
- Loss: 0.3963
|
20 |
+
|
21 |
+
## Model description
|
22 |
+
|
23 |
+
More information needed
|
24 |
+
|
25 |
+
## Intended uses & limitations
|
26 |
+
|
27 |
+
More information needed
|
28 |
+
|
29 |
+
## Training and evaluation data
|
30 |
+
|
31 |
+
More information needed
|
32 |
+
|
33 |
+
## Training procedure
|
34 |
+
|
35 |
+
### Training hyperparameters
|
36 |
+
|
37 |
+
The following hyperparameters were used during training:
|
38 |
+
- learning_rate: 0.0002
|
39 |
+
- train_batch_size: 2
|
40 |
+
- eval_batch_size: 2
|
41 |
+
- seed: 42
|
42 |
+
- gradient_accumulation_steps: 3
|
43 |
+
- total_train_batch_size: 6
|
44 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
45 |
+
- lr_scheduler_type: cosine
|
46 |
+
- lr_scheduler_warmup_steps: 20
|
47 |
+
- num_epochs: 1
|
48 |
+
|
49 |
+
### Training results
|
50 |
+
|
51 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
52 |
+
|:-------------:|:-----:|:-----:|:---------------:|
|
53 |
+
| 1.9065 | 0.01 | 200 | 1.9889 |
|
54 |
+
| 1.764 | 0.01 | 400 | 1.8015 |
|
55 |
+
| 1.6177 | 0.02 | 600 | 1.6777 |
|
56 |
+
| 1.5756 | 0.03 | 800 | 1.5862 |
|
57 |
+
| 1.4847 | 0.03 | 1000 | 1.5134 |
|
58 |
+
| 1.5603 | 0.04 | 1200 | 1.4480 |
|
59 |
+
| 1.347 | 0.05 | 1400 | 1.3939 |
|
60 |
+
| 1.2312 | 0.06 | 1600 | 1.3450 |
|
61 |
+
| 1.3749 | 0.06 | 1800 | 1.3037 |
|
62 |
+
| 1.2194 | 0.07 | 2000 | 1.2657 |
|
63 |
+
| 1.2434 | 0.08 | 2200 | 1.2297 |
|
64 |
+
| 1.2037 | 0.08 | 2400 | 1.1983 |
|
65 |
+
| 1.2709 | 0.09 | 2600 | 1.1679 |
|
66 |
+
| 1.1537 | 0.1 | 2800 | 1.1405 |
|
67 |
+
| 1.1408 | 0.1 | 3000 | 1.1154 |
|
68 |
+
| 1.0502 | 0.11 | 3200 | 1.0921 |
|
69 |
+
| 1.0453 | 0.12 | 3400 | 1.0717 |
|
70 |
+
| 0.9332 | 0.13 | 3600 | 1.0462 |
|
71 |
+
| 1.1569 | 0.13 | 3800 | 1.0284 |
|
72 |
+
| 1.146 | 0.14 | 4000 | 1.0092 |
|
73 |
+
| 1.0325 | 0.15 | 4200 | 0.9916 |
|
74 |
+
| 0.9936 | 0.15 | 4400 | 0.9748 |
|
75 |
+
| 0.9131 | 0.16 | 4600 | 0.9592 |
|
76 |
+
| 0.9426 | 0.17 | 4800 | 0.9416 |
|
77 |
+
| 0.8971 | 0.17 | 5000 | 0.9278 |
|
78 |
+
| 0.8258 | 0.18 | 5200 | 0.9124 |
|
79 |
+
| 0.8038 | 0.19 | 5400 | 0.8991 |
|
80 |
+
| 0.9663 | 0.2 | 5600 | 0.8841 |
|
81 |
+
| 0.7214 | 0.2 | 5800 | 0.8726 |
|
82 |
+
| 0.8789 | 0.21 | 6000 | 0.8621 |
|
83 |
+
| 0.8095 | 0.22 | 6200 | 0.8492 |
|
84 |
+
| 0.8446 | 0.22 | 6400 | 0.8379 |
|
85 |
+
| 0.8704 | 0.23 | 6600 | 0.8261 |
|
86 |
+
| 0.8175 | 0.24 | 6800 | 0.8165 |
|
87 |
+
| 0.7169 | 0.24 | 7000 | 0.8066 |
|
88 |
+
| 0.6899 | 0.25 | 7200 | 0.7952 |
|
89 |
+
| 0.8256 | 0.26 | 7400 | 0.7860 |
|
90 |
+
| 0.7186 | 0.27 | 7600 | 0.7759 |
|
91 |
+
| 0.8891 | 0.27 | 7800 | 0.7664 |
|
92 |
+
| 0.7919 | 0.28 | 8000 | 0.7580 |
|
93 |
+
| 0.8378 | 0.29 | 8200 | 0.7487 |
|
94 |
+
| 0.6804 | 0.29 | 8400 | 0.7390 |
|
95 |
+
| 0.6921 | 0.3 | 8600 | 0.7308 |
|
96 |
+
| 0.7263 | 0.31 | 8800 | 0.7221 |
|
97 |
+
| 0.6268 | 0.31 | 9000 | 0.7145 |
|
98 |
+
| 0.6954 | 0.32 | 9200 | 0.7067 |
|
99 |
+
| 0.8179 | 0.33 | 9400 | 0.6992 |
|
100 |
+
| 0.5709 | 0.34 | 9600 | 0.6929 |
|
101 |
+
| 0.7092 | 0.34 | 9800 | 0.6858 |
|
102 |
+
| 0.7396 | 0.35 | 10000 | 0.6774 |
|
103 |
+
| 0.5967 | 0.36 | 10200 | 0.6702 |
|
104 |
+
| 0.6984 | 0.36 | 10400 | 0.6636 |
|
105 |
+
| 0.5621 | 0.37 | 10600 | 0.6578 |
|
106 |
+
| 0.5342 | 0.38 | 10800 | 0.6512 |
|
107 |
+
| 0.6695 | 0.38 | 11000 | 0.6442 |
|
108 |
+
| 0.7242 | 0.39 | 11200 | 0.6374 |
|
109 |
+
| 0.6046 | 0.4 | 11400 | 0.6317 |
|
110 |
+
| 0.5994 | 0.41 | 11600 | 0.6260 |
|
111 |
+
| 0.6133 | 0.41 | 11800 | 0.6201 |
|
112 |
+
| 0.6707 | 0.42 | 12000 | 0.6138 |
|
113 |
+
| 0.6454 | 0.43 | 12200 | 0.6078 |
|
114 |
+
| 0.6534 | 0.43 | 12400 | 0.6013 |
|
115 |
+
| 0.6382 | 0.44 | 12600 | 0.5963 |
|
116 |
+
| 0.6659 | 0.45 | 12800 | 0.5908 |
|
117 |
+
| 0.6608 | 0.45 | 13000 | 0.5851 |
|
118 |
+
| 0.6864 | 0.46 | 13200 | 0.5793 |
|
119 |
+
| 0.6046 | 0.47 | 13400 | 0.5748 |
|
120 |
+
| 0.5331 | 0.48 | 13600 | 0.5692 |
|
121 |
+
| 0.5314 | 0.48 | 13800 | 0.5640 |
|
122 |
+
| 0.5528 | 0.49 | 14000 | 0.5594 |
|
123 |
+
| 0.5617 | 0.5 | 14200 | 0.5545 |
|
124 |
+
| 0.6572 | 0.5 | 14400 | 0.5488 |
|
125 |
+
| 0.6108 | 0.51 | 14600 | 0.5441 |
|
126 |
+
| 0.4776 | 0.52 | 14800 | 0.5387 |
|
127 |
+
| 0.493 | 0.52 | 15000 | 0.5357 |
|
128 |
+
| 0.5464 | 0.53 | 15200 | 0.5305 |
|
129 |
+
| 0.5519 | 0.54 | 15400 | 0.5260 |
|
130 |
+
| 0.4209 | 0.55 | 15600 | 0.5225 |
|
131 |
+
| 0.4759 | 0.55 | 15800 | 0.5173 |
|
132 |
+
| 0.5357 | 0.56 | 16000 | 0.5129 |
|
133 |
+
| 0.6064 | 0.57 | 16200 | 0.5091 |
|
134 |
+
| 0.4835 | 0.57 | 16400 | 0.5048 |
|
135 |
+
| 0.4951 | 0.58 | 16600 | 0.5017 |
|
136 |
+
| 0.3621 | 0.59 | 16800 | 0.4971 |
|
137 |
+
| 0.5166 | 0.59 | 17000 | 0.4935 |
|
138 |
+
| 0.5464 | 0.6 | 17200 | 0.4896 |
|
139 |
+
| 0.5093 | 0.61 | 17400 | 0.4858 |
|
140 |
+
| 0.443 | 0.62 | 17600 | 0.4828 |
|
141 |
+
| 0.4323 | 0.62 | 17800 | 0.4787 |
|
142 |
+
| 0.5066 | 0.63 | 18000 | 0.4754 |
|
143 |
+
| 0.4388 | 0.64 | 18200 | 0.4717 |
|
144 |
+
| 0.5436 | 0.64 | 18400 | 0.4682 |
|
145 |
+
| 0.3881 | 0.65 | 18600 | 0.4649 |
|
146 |
+
| 0.6051 | 0.66 | 18800 | 0.4623 |
|
147 |
+
| 0.5628 | 0.66 | 19000 | 0.4589 |
|
148 |
+
| 0.4372 | 0.67 | 19200 | 0.4560 |
|
149 |
+
| 0.4748 | 0.68 | 19400 | 0.4529 |
|
150 |
+
| 0.5461 | 0.69 | 19600 | 0.4499 |
|
151 |
+
| 0.4313 | 0.69 | 19800 | 0.4471 |
|
152 |
+
| 0.4353 | 0.7 | 20000 | 0.4445 |
|
153 |
+
| 0.4988 | 0.71 | 20200 | 0.4419 |
|
154 |
+
| 0.4037 | 0.71 | 20400 | 0.4394 |
|
155 |
+
| 0.446 | 0.72 | 20600 | 0.4368 |
|
156 |
+
| 0.4381 | 0.73 | 20800 | 0.4347 |
|
157 |
+
| 0.4849 | 0.73 | 21000 | 0.4324 |
|
158 |
+
| 0.4726 | 0.74 | 21200 | 0.4303 |
|
159 |
+
| 0.4842 | 0.75 | 21400 | 0.4279 |
|
160 |
+
| 0.3508 | 0.76 | 21600 | 0.4259 |
|
161 |
+
| 0.4452 | 0.76 | 21800 | 0.4236 |
|
162 |
+
| 0.3565 | 0.77 | 22000 | 0.4216 |
|
163 |
+
| 0.4634 | 0.78 | 22200 | 0.4196 |
|
164 |
+
| 0.3925 | 0.78 | 22400 | 0.4179 |
|
165 |
+
| 0.4086 | 0.79 | 22600 | 0.4164 |
|
166 |
+
| 0.4149 | 0.8 | 22800 | 0.4145 |
|
167 |
+
| 0.3856 | 0.8 | 23000 | 0.4128 |
|
168 |
+
| 0.4053 | 0.81 | 23200 | 0.4113 |
|
169 |
+
| 0.47 | 0.82 | 23400 | 0.4099 |
|
170 |
+
| 0.3918 | 0.83 | 23600 | 0.4086 |
|
171 |
+
| 0.4021 | 0.83 | 23800 | 0.4074 |
|
172 |
+
| 0.376 | 0.84 | 24000 | 0.4063 |
|
173 |
+
| 0.5067 | 0.85 | 24200 | 0.4052 |
|
174 |
+
| 0.4721 | 0.85 | 24400 | 0.4041 |
|
175 |
+
| 0.4015 | 0.86 | 24600 | 0.4031 |
|
176 |
+
| 0.3576 | 0.87 | 24800 | 0.4021 |
|
177 |
+
| 0.3975 | 0.87 | 25000 | 0.4015 |
|
178 |
+
| 0.3898 | 0.88 | 25200 | 0.4006 |
|
179 |
+
| 0.4235 | 0.89 | 25400 | 0.4000 |
|
180 |
+
| 0.3808 | 0.9 | 25600 | 0.3992 |
|
181 |
+
| 0.3811 | 0.9 | 25800 | 0.3989 |
|
182 |
+
| 0.4011 | 0.91 | 26000 | 0.3983 |
|
183 |
+
| 0.4219 | 0.92 | 26200 | 0.3981 |
|
184 |
+
| 0.3764 | 0.92 | 26400 | 0.3977 |
|
185 |
+
| 0.4046 | 0.93 | 26600 | 0.3974 |
|
186 |
+
| 0.4342 | 0.94 | 26800 | 0.3972 |
|
187 |
+
| 0.4209 | 0.94 | 27000 | 0.3969 |
|
188 |
+
| 0.4549 | 0.95 | 27200 | 0.3967 |
|
189 |
+
| 0.3316 | 0.96 | 27400 | 0.3967 |
|
190 |
+
| 0.4648 | 0.97 | 27600 | 0.3967 |
|
191 |
+
| 0.4657 | 0.97 | 27800 | 0.3965 |
|
192 |
+
| 0.3959 | 0.98 | 28000 | 0.3964 |
|
193 |
+
| 0.3666 | 0.99 | 28200 | 0.3964 |
|
194 |
+
| 0.3973 | 0.99 | 28400 | 0.3963 |
|
195 |
+
|
196 |
+
|
197 |
+
### Framework versions
|
198 |
+
|
199 |
+
- Transformers 4.35.0.dev0
|
200 |
+
- Pytorch 2.1.0+cu118
|
201 |
+
- Datasets 2.14.5
|
202 |
+
- Tokenizers 0.14.1
|
alpha-prompt_v1/adapter_config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Gryphe/MythoMax-L2-13b",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": null,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layers_pattern": null,
|
10 |
+
"layers_to_transform": null,
|
11 |
+
"lora_alpha": 16,
|
12 |
+
"lora_dropout": 0.05,
|
13 |
+
"modules_to_save": null,
|
14 |
+
"peft_type": "LORA",
|
15 |
+
"r": 32,
|
16 |
+
"rank_pattern": {},
|
17 |
+
"revision": null,
|
18 |
+
"target_modules": [
|
19 |
+
"down_proj",
|
20 |
+
"k_proj",
|
21 |
+
"up_proj",
|
22 |
+
"o_proj",
|
23 |
+
"v_proj",
|
24 |
+
"q_proj",
|
25 |
+
"gate_proj"
|
26 |
+
],
|
27 |
+
"task_type": "CAUSAL_LM"
|
28 |
+
}
|
alpha-prompt_v1/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c64287e70391f53c1ac015be3cebcdb343deaaf7d02ff984664b28245ad91b36
|
3 |
+
size 500897546
|
alpha-prompt_v1/added_tokens.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</s>": 2,
|
3 |
+
"<pad>": 32000,
|
4 |
+
"<s>": 1,
|
5 |
+
"<unk>": 0
|
6 |
+
}
|
alpha-prompt_v1/checkpoint-28576/README.md
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
base_model: Gryphe/MythoMax-L2-13b
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Shared by [optional]:** [More Information Needed]
|
22 |
+
- **Model type:** [More Information Needed]
|
23 |
+
- **Language(s) (NLP):** [More Information Needed]
|
24 |
+
- **License:** [More Information Needed]
|
25 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
26 |
+
|
27 |
+
### Model Sources [optional]
|
28 |
+
|
29 |
+
<!-- Provide the basic links for the model. -->
|
30 |
+
|
31 |
+
- **Repository:** [More Information Needed]
|
32 |
+
- **Paper [optional]:** [More Information Needed]
|
33 |
+
- **Demo [optional]:** [More Information Needed]
|
34 |
+
|
35 |
+
## Uses
|
36 |
+
|
37 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
38 |
+
|
39 |
+
### Direct Use
|
40 |
+
|
41 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
42 |
+
|
43 |
+
[More Information Needed]
|
44 |
+
|
45 |
+
### Downstream Use [optional]
|
46 |
+
|
47 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
48 |
+
|
49 |
+
[More Information Needed]
|
50 |
+
|
51 |
+
### Out-of-Scope Use
|
52 |
+
|
53 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
54 |
+
|
55 |
+
[More Information Needed]
|
56 |
+
|
57 |
+
## Bias, Risks, and Limitations
|
58 |
+
|
59 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
60 |
+
|
61 |
+
[More Information Needed]
|
62 |
+
|
63 |
+
### Recommendations
|
64 |
+
|
65 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
66 |
+
|
67 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
68 |
+
|
69 |
+
## How to Get Started with the Model
|
70 |
+
|
71 |
+
Use the code below to get started with the model.
|
72 |
+
|
73 |
+
[More Information Needed]
|
74 |
+
|
75 |
+
## Training Details
|
76 |
+
|
77 |
+
### Training Data
|
78 |
+
|
79 |
+
<!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
80 |
+
|
81 |
+
[More Information Needed]
|
82 |
+
|
83 |
+
### Training Procedure
|
84 |
+
|
85 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
86 |
+
|
87 |
+
#### Preprocessing [optional]
|
88 |
+
|
89 |
+
[More Information Needed]
|
90 |
+
|
91 |
+
|
92 |
+
#### Training Hyperparameters
|
93 |
+
|
94 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
95 |
+
|
96 |
+
#### Speeds, Sizes, Times [optional]
|
97 |
+
|
98 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
99 |
+
|
100 |
+
[More Information Needed]
|
101 |
+
|
102 |
+
## Evaluation
|
103 |
+
|
104 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
105 |
+
|
106 |
+
### Testing Data, Factors & Metrics
|
107 |
+
|
108 |
+
#### Testing Data
|
109 |
+
|
110 |
+
<!-- This should link to a Data Card if possible. -->
|
111 |
+
|
112 |
+
[More Information Needed]
|
113 |
+
|
114 |
+
#### Factors
|
115 |
+
|
116 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
117 |
+
|
118 |
+
[More Information Needed]
|
119 |
+
|
120 |
+
#### Metrics
|
121 |
+
|
122 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
123 |
+
|
124 |
+
[More Information Needed]
|
125 |
+
|
126 |
+
### Results
|
127 |
+
|
128 |
+
[More Information Needed]
|
129 |
+
|
130 |
+
#### Summary
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
## Model Examination [optional]
|
135 |
+
|
136 |
+
<!-- Relevant interpretability work for the model goes here -->
|
137 |
+
|
138 |
+
[More Information Needed]
|
139 |
+
|
140 |
+
## Environmental Impact
|
141 |
+
|
142 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
143 |
+
|
144 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
145 |
+
|
146 |
+
- **Hardware Type:** [More Information Needed]
|
147 |
+
- **Hours used:** [More Information Needed]
|
148 |
+
- **Cloud Provider:** [More Information Needed]
|
149 |
+
- **Compute Region:** [More Information Needed]
|
150 |
+
- **Carbon Emitted:** [More Information Needed]
|
151 |
+
|
152 |
+
## Technical Specifications [optional]
|
153 |
+
|
154 |
+
### Model Architecture and Objective
|
155 |
+
|
156 |
+
[More Information Needed]
|
157 |
+
|
158 |
+
### Compute Infrastructure
|
159 |
+
|
160 |
+
[More Information Needed]
|
161 |
+
|
162 |
+
#### Hardware
|
163 |
+
|
164 |
+
[More Information Needed]
|
165 |
+
|
166 |
+
#### Software
|
167 |
+
|
168 |
+
[More Information Needed]
|
169 |
+
|
170 |
+
## Citation [optional]
|
171 |
+
|
172 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
173 |
+
|
174 |
+
**BibTeX:**
|
175 |
+
|
176 |
+
[More Information Needed]
|
177 |
+
|
178 |
+
**APA:**
|
179 |
+
|
180 |
+
[More Information Needed]
|
181 |
+
|
182 |
+
## Glossary [optional]
|
183 |
+
|
184 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
185 |
+
|
186 |
+
[More Information Needed]
|
187 |
+
|
188 |
+
## More Information [optional]
|
189 |
+
|
190 |
+
[More Information Needed]
|
191 |
+
|
192 |
+
## Model Card Authors [optional]
|
193 |
+
|
194 |
+
[More Information Needed]
|
195 |
+
|
196 |
+
## Model Card Contact
|
197 |
+
|
198 |
+
[More Information Needed]
|
199 |
+
|
200 |
+
|
201 |
+
## Training procedure
|
202 |
+
|
203 |
+
|
204 |
+
The following `bitsandbytes` quantization config was used during training:
|
205 |
+
- quant_method: bitsandbytes
|
206 |
+
- load_in_8bit: True
|
207 |
+
- load_in_4bit: False
|
208 |
+
- llm_int8_threshold: 6.0
|
209 |
+
- llm_int8_skip_modules: None
|
210 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
211 |
+
- llm_int8_has_fp16_weight: False
|
212 |
+
- bnb_4bit_quant_type: fp4
|
213 |
+
- bnb_4bit_use_double_quant: False
|
214 |
+
- bnb_4bit_compute_dtype: float32
|
215 |
+
|
216 |
+
### Framework versions
|
217 |
+
|
218 |
+
|
219 |
+
- PEFT 0.6.0.dev0
|
alpha-prompt_v1/checkpoint-28576/adapter_config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Gryphe/MythoMax-L2-13b",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": null,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layers_pattern": null,
|
10 |
+
"layers_to_transform": null,
|
11 |
+
"lora_alpha": 16,
|
12 |
+
"lora_dropout": 0.05,
|
13 |
+
"modules_to_save": null,
|
14 |
+
"peft_type": "LORA",
|
15 |
+
"r": 32,
|
16 |
+
"rank_pattern": {},
|
17 |
+
"revision": null,
|
18 |
+
"target_modules": [
|
19 |
+
"down_proj",
|
20 |
+
"k_proj",
|
21 |
+
"up_proj",
|
22 |
+
"o_proj",
|
23 |
+
"v_proj",
|
24 |
+
"q_proj",
|
25 |
+
"gate_proj"
|
26 |
+
],
|
27 |
+
"task_type": "CAUSAL_LM"
|
28 |
+
}
|
alpha-prompt_v1/checkpoint-28576/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c64287e70391f53c1ac015be3cebcdb343deaaf7d02ff984664b28245ad91b36
|
3 |
+
size 500897546
|
alpha-prompt_v1/checkpoint-28576/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b27be11db4e3ac13ee4af3e9dd02643f47dc0bcf19ca2f03b23239628de206e7
|
3 |
+
size 251485652
|
alpha-prompt_v1/checkpoint-28576/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15941810e1b0adf6d3aebb53ef32e5ebb64a0ad2703b71e3c29b6b957ced9cc4
|
3 |
+
size 14244
|
alpha-prompt_v1/checkpoint-28576/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:708a30ddbd1285185431577f79954f5206248c87a11265e9c6b2f9952241f3c6
|
3 |
+
size 1064
|
alpha-prompt_v1/checkpoint-28576/trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
alpha-prompt_v1/checkpoint-28576/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:496f9a53e22cbc63d9098f12ae40f8ad01b500e86cb27ab06b05e6ad8ed461d8
|
3 |
+
size 4920
|
alpha-prompt_v1/config.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Gryphe/MythoMax-L2-13b",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 5120,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 13824,
|
13 |
+
"max_position_embeddings": 4096,
|
14 |
+
"model_type": "llama",
|
15 |
+
"num_attention_heads": 40,
|
16 |
+
"num_hidden_layers": 40,
|
17 |
+
"num_key_value_heads": 40,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"pretraining_tp": 1,
|
20 |
+
"quantization_config": {
|
21 |
+
"bnb_4bit_compute_dtype": "float32",
|
22 |
+
"bnb_4bit_quant_type": "fp4",
|
23 |
+
"bnb_4bit_use_double_quant": false,
|
24 |
+
"llm_int8_enable_fp32_cpu_offload": false,
|
25 |
+
"llm_int8_has_fp16_weight": false,
|
26 |
+
"llm_int8_skip_modules": null,
|
27 |
+
"llm_int8_threshold": 6.0,
|
28 |
+
"load_in_4bit": false,
|
29 |
+
"load_in_8bit": true,
|
30 |
+
"quant_method": "bitsandbytes"
|
31 |
+
},
|
32 |
+
"rms_norm_eps": 1e-05,
|
33 |
+
"rope_scaling": null,
|
34 |
+
"rope_theta": 10000.0,
|
35 |
+
"tie_word_embeddings": false,
|
36 |
+
"torch_dtype": "float16",
|
37 |
+
"transformers_version": "4.35.0.dev0",
|
38 |
+
"use_cache": false,
|
39 |
+
"vocab_size": 32001
|
40 |
+
}
|
alpha-prompt_v1/runs/Oct16_18-49-32_babylon/events.out.tfevents.1697496580.babylon.1010856.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67809baa236b5a3633958b6877e7dba4fc9ded5a3c03085aedb8cb7788c2e958
|
3 |
+
size 4958
|
alpha-prompt_v1/runs/Oct16_18-56-39_babylon/events.out.tfevents.1697497007.babylon.1012350.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e07dc4ec6e69f7e543e4e22fd2adbb22575ab7d4a4d07d218161504731471814
|
3 |
+
size 4958
|
alpha-prompt_v1/runs/Oct16_19-01-12_babylon/events.out.tfevents.1697497280.babylon.1013466.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e0ec436ebddb55d6f957749ec859417efda37b3d9479d5108909609de268dfa
|
3 |
+
size 4958
|
alpha-prompt_v1/runs/Oct16_19-04-08_babylon/events.out.tfevents.1697497455.babylon.1013704.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8269e91289c771b8c63c9f6ce4b873cf161a1a427cd5a32b752d05499c056abc
|
3 |
+
size 8037
|
alpha-prompt_v1/runs/Oct16_19-24-00_babylon/events.out.tfevents.1697498648.babylon.1015156.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:38d03a1c22d90429672bfa163dc85f7fd04912a6a972264bdbc1958d47627e5f
|
3 |
+
size 4566735
|
alpha-prompt_v1/special_tokens_map.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"eos_token": "</s>",
|
4 |
+
"pad_token": "<unk>",
|
5 |
+
"unk_token": "<unk>"
|
6 |
+
}
|
alpha-prompt_v1/tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
alpha-prompt_v1/tokenizer_config.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"32000": {
|
30 |
+
"content": "<pad>",
|
31 |
+
"lstrip": true,
|
32 |
+
"normalized": true,
|
33 |
+
"rstrip": true,
|
34 |
+
"single_word": false,
|
35 |
+
"special": false
|
36 |
+
}
|
37 |
+
},
|
38 |
+
"additional_special_tokens": [],
|
39 |
+
"bos_token": "<s>",
|
40 |
+
"clean_up_tokenization_spaces": false,
|
41 |
+
"eos_token": "</s>",
|
42 |
+
"legacy": false,
|
43 |
+
"model_max_length": 4096,
|
44 |
+
"pad_token": "<unk>",
|
45 |
+
"padding_side": "right",
|
46 |
+
"sp_model_kwargs": {},
|
47 |
+
"spaces_between_special_tokens": false,
|
48 |
+
"tokenizer_class": "LlamaTokenizer",
|
49 |
+
"tokenizer_file": null,
|
50 |
+
"trust_remote_code": false,
|
51 |
+
"unk_token": "<unk>",
|
52 |
+
"use_default_system_prompt": true,
|
53 |
+
"use_fast": true
|
54 |
+
}
|