dwb2023 commited on
Commit
0d3a00d
1 Parent(s): aa0ee8d

ai-maker-space/llama38binstruct-summary-jun14-b4-merge

Browse files
README.md CHANGED
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [NousResearch/Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 1.9957
24
 
25
  ## Model description
26
 
@@ -50,18 +50,18 @@ The following hyperparameters were used during training:
50
 
51
  ### Training results
52
 
53
- | Training Loss | Epoch | Step | Validation Loss |
54
- |:-------------:|:-----:|:----:|:---------------:|
55
- | 1.4216 | 1.25 | 25 | 1.4108 |
56
- | 0.5039 | 2.5 | 50 | 1.6955 |
57
- | 0.183 | 3.75 | 75 | 1.8566 |
58
- | 0.1127 | 5.0 | 100 | 1.9957 |
59
 
60
 
61
  ### Framework versions
62
 
63
  - PEFT 0.11.1
64
  - Transformers 4.41.2
65
- - Pytorch 2.3.0+cu121
66
  - Datasets 2.20.0
67
  - Tokenizers 0.19.1
 
20
 
21
  This model is a fine-tuned version of [NousResearch/Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.4391
24
 
25
  ## Model description
26
 
 
50
 
51
  ### Training results
52
 
53
+ | Training Loss | Epoch | Step | Validation Loss |
54
+ |:-------------:|:------:|:----:|:---------------:|
55
+ | 1.4009 | 1.3889 | 25 | 1.0738 |
56
+ | 0.4554 | 2.7778 | 50 | 1.2629 |
57
+ | 0.2384 | 4.1667 | 75 | 1.3364 |
58
+ | 0.0555 | 5.5556 | 100 | 1.4391 |
59
 
60
 
61
  ### Framework versions
62
 
63
  - PEFT 0.11.1
64
  - Transformers 4.41.2
65
+ - Pytorch 2.3.1+cu121
66
  - Datasets 2.20.0
67
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -20,12 +20,12 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "o_proj",
24
- "v_proj",
25
- "q_proj",
26
  "down_proj",
 
27
  "gate_proj",
28
- "k_proj",
 
29
  "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "k_proj",
 
 
24
  "down_proj",
25
+ "q_proj",
26
  "gate_proj",
27
+ "o_proj",
28
+ "v_proj",
29
  "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e44ce263e6fd885f50d82ca515b9325375b43ee36ededb75acf161ce88bc2e41
3
- size 48
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:327a960fa71119ffe1ff78f6b2017652be2348c0921ced6558f431f27f099cd3
3
+ size 167832240
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ddeed7fc25a7b2d97345ae61c62c8ef1296bab1c3c6a1c366bd84db2d6c05323
3
- size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a10f393c1617151db0aa89533642c35b66d61da5abea9a055150180aa528bf1
3
+ size 5432