kiendt commited on
Commit
77e102a
·
verified ·
1 Parent(s): 5d30ad4
README.md CHANGED
@@ -12,6 +12,7 @@ model-index:
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
 
15
  # experiments
16
 
17
  This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the None dataset.
@@ -42,7 +43,7 @@ The following hyperparameters were used during training:
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: cosine
44
  - lr_scheduler_warmup_ratio: 0.05
45
- - num_epochs: 0.2
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
@@ -53,6 +54,6 @@ The following hyperparameters were used during training:
53
 
54
  - PEFT 0.10.1.dev0
55
  - Transformers 4.41.0.dev0
56
- - Pytorch 2.2.1+cu121
57
  - Datasets 2.19.0
58
  - Tokenizers 0.19.1
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/trungkien-mv123/huggingface/runs/rg79iea6)
16
  # experiments
17
 
18
  This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the None dataset.
 
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: cosine
45
  - lr_scheduler_warmup_ratio: 0.05
46
+ - num_epochs: 1
47
  - mixed_precision_training: Native AMP
48
 
49
  ### Training results
 
54
 
55
  - PEFT 0.10.1.dev0
56
  - Transformers 4.41.0.dev0
57
+ - Pytorch 2.1.2
58
  - Datasets 2.19.0
59
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -20,12 +20,12 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "gate_proj",
24
  "o_proj",
25
- "k_proj",
26
  "v_proj",
27
- "down_proj",
28
  "up_proj",
 
29
  "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "k_proj",
24
  "gate_proj",
25
  "o_proj",
 
26
  "v_proj",
 
27
  "up_proj",
28
+ "down_proj",
29
  "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3192c2a90728f057bdd4d24a2b9095a208fa061eb840d30a9253ab4ffc74f78
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69cd9ae54febd4c4dded941000153e63a7b0a97d44f07aa79a3c8da0e19ecba5
3
  size 167832240
runs/Apr22_05-08-34_5d51fb9489cf/events.out.tfevents.1713762528.5d51fb9489cf.34.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:881d9a9cfd6e1ed20021e86eecd8dbe1c8fa7ba11f9ded71b9c4a8563e237897
3
+ size 488901
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75facd3c85db8af1f3af634e43c2329e74ee9336e0ab038f2d5398ff521c4b42
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b42be8612e70a48c1f58bfb9b310c35c84d939ebeec5f63df4a01a97ce51accb
3
  size 4984