Commit
•
89e91a6
1
Parent(s):
8f606dd
paul-stansifer/llama3-qwantz-gen
Browse files
README.md
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
---
|
2 |
-
|
3 |
library_name: peft
|
|
|
4 |
tags:
|
5 |
- generated_from_trainer
|
6 |
-
base_model: unsloth/llama-3-8b-bnb-4bit
|
7 |
model-index:
|
8 |
- name: llama3-qwantz-gen
|
9 |
results: []
|
@@ -16,7 +16,12 @@ should probably proofread and complete it, then remove this comment. -->
|
|
16 |
|
17 |
This model is a fine-tuned version of [unsloth/llama-3-8b-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-bnb-4bit) on an unknown dataset.
|
18 |
It achieves the following results on the evaluation set:
|
19 |
-
-
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
## Model description
|
22 |
|
@@ -41,22 +46,12 @@ The following hyperparameters were used during training:
|
|
41 |
- seed: 42
|
42 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
43 |
- lr_scheduler_type: linear
|
44 |
-
- num_epochs:
|
45 |
-
|
46 |
-
### Training results
|
47 |
-
|
48 |
-
| Training Loss | Epoch | Step | Validation Loss |
|
49 |
-
|:-------------:|:------:|:----:|:---------------:|
|
50 |
-
| 1.5072 | 0.2013 | 62 | 1.4683 |
|
51 |
-
| 1.3859 | 0.4026 | 124 | 1.4277 |
|
52 |
-
| 1.3615 | 0.6039 | 186 | 1.4143 |
|
53 |
-
| 1.3761 | 0.8052 | 248 | 1.4044 |
|
54 |
-
|
55 |
|
56 |
### Framework versions
|
57 |
|
58 |
- PEFT 0.11.1
|
59 |
- Transformers 4.41.2
|
60 |
- Pytorch 2.3.0+cu121
|
61 |
-
- Datasets 2.
|
62 |
- Tokenizers 0.19.1
|
|
|
1 |
---
|
2 |
+
base_model: unsloth/llama-3-8b-bnb-4bit
|
3 |
library_name: peft
|
4 |
+
license: llama2
|
5 |
tags:
|
6 |
- generated_from_trainer
|
|
|
7 |
model-index:
|
8 |
- name: llama3-qwantz-gen
|
9 |
results: []
|
|
|
16 |
|
17 |
This model is a fine-tuned version of [unsloth/llama-3-8b-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-bnb-4bit) on an unknown dataset.
|
18 |
It achieves the following results on the evaluation set:
|
19 |
+
- eval_loss: 1.4032
|
20 |
+
- eval_runtime: 305.3925
|
21 |
+
- eval_samples_per_second: 0.508
|
22 |
+
- eval_steps_per_second: 0.128
|
23 |
+
- epoch: 0.8052
|
24 |
+
- step: 248
|
25 |
|
26 |
## Model description
|
27 |
|
|
|
46 |
- seed: 42
|
47 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
48 |
- lr_scheduler_type: linear
|
49 |
+
- num_epochs: 2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
### Framework versions
|
52 |
|
53 |
- PEFT 0.11.1
|
54 |
- Transformers 4.41.2
|
55 |
- Pytorch 2.3.0+cu121
|
56 |
+
- Datasets 2.20.0
|
57 |
- Tokenizers 0.19.1
|
adapter_config.json
CHANGED
@@ -20,10 +20,10 @@
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
-
"
|
24 |
"v_proj",
|
25 |
-
"
|
26 |
-
"
|
27 |
],
|
28 |
"task_type": "CAUSAL_LM",
|
29 |
"use_dora": false,
|
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
+
"k_proj",
|
24 |
"v_proj",
|
25 |
+
"o_proj",
|
26 |
+
"q_proj"
|
27 |
],
|
28 |
"task_type": "CAUSAL_LM",
|
29 |
"use_dora": false,
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 54560368
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bdc66dcd776a7511e925982f8d1b6c4d8b6fcc150e159f9ecf22563acf5eb373
|
3 |
size 54560368
|
runs/Jul12_15-29-47_0aa09e1ec8f8/events.out.tfevents.1720798188.0aa09e1ec8f8.494.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:599e483f59fac80f13bf3bd48cf176c21b59d50e72ab7e5a26e154e9986cfe6d
|
3 |
+
size 10125
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5112
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44abc76211d6bcfcad6bdd9fdad8a28bfe03bf64381d4253c4f7cbcb551f6344
|
3 |
size 5112
|