RayBernard commited on
Commit
d49a84d
1 Parent(s): 7e7a009

Model save

Browse files
README.md CHANGED
@@ -6,8 +6,6 @@ tags:
6
  - sft
7
  - generated_from_trainer
8
  base_model: meta-llama/Meta-Llama-3-8B-Instruct
9
- datasets:
10
- - generator
11
  model-index:
12
  - name: llama-3-8B-Instruct-ft
13
  results: []
@@ -18,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # llama-3-8B-Instruct-ft
20
 
21
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
 
23
  ## Model description
24
 
@@ -49,10 +47,6 @@ The following hyperparameters were used during training:
49
  - training_steps: 50
50
  - mixed_precision_training: Native AMP
51
 
52
- ### Training results
53
-
54
-
55
-
56
  ### Framework versions
57
 
58
  - PEFT 0.11.1
 
6
  - sft
7
  - generated_from_trainer
8
  base_model: meta-llama/Meta-Llama-3-8B-Instruct
 
 
9
  model-index:
10
  - name: llama-3-8B-Instruct-ft
11
  results: []
 
16
 
17
  # llama-3-8B-Instruct-ft
18
 
19
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on an unknown dataset.
20
 
21
  ## Model description
22
 
 
47
  - training_steps: 50
48
  - mixed_precision_training: Native AMP
49
 
 
 
 
 
50
  ### Framework versions
51
 
52
  - PEFT 0.11.1
adapter_config.json CHANGED
@@ -20,8 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
  "v_proj",
 
25
  "output_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "v_proj",
24
+ "q_proj",
25
  "output_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ff4c15d2867570e5ddfc818c5e6cea7b787e5216c40b0a5774b18e8e5cca6ac
3
  size 4311795296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:198120465bc9e07ef7baf57ab4658276730b35bf160df3b57c764c0e7caa0e4e
3
  size 4311795296
special_tokens_map.json CHANGED
@@ -13,11 +13,5 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": {
17
- "content": "[PAD]",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- }
23
  }
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "[PAD]"
 
 
 
 
 
 
17
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:833e5f5177ac5dbb8ce5c28f8fd4b01777ce8545b526179c5d2a95c9521d0082
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:289ca0e958f2093fd7fe04571bb05c8253f22af3166a46ae71ee9da0a850125e
3
  size 5368