dwb2023 commited on
Commit
aa0ee8d
1 Parent(s): bee5696

dwb2023/llama38binstruct_summarize_v3

Browse files
README.md CHANGED
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [NousResearch/Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 2.0847
24
 
25
  ## Model description
26
 
@@ -50,12 +50,12 @@ The following hyperparameters were used during training:
50
 
51
  ### Training results
52
 
53
- | Training Loss | Epoch | Step | Validation Loss |
54
- |:-------------:|:------:|:----:|:---------------:|
55
- | 1.4079 | 1.1905 | 25 | 1.4325 |
56
- | 0.3935 | 2.3810 | 50 | 1.6786 |
57
- | 0.3836 | 3.5714 | 75 | 1.7694 |
58
- | 0.1039 | 4.7619 | 100 | 2.0847 |
59
 
60
 
61
  ### Framework versions
@@ -63,5 +63,5 @@ The following hyperparameters were used during training:
63
  - PEFT 0.11.1
64
  - Transformers 4.41.2
65
  - Pytorch 2.3.0+cu121
66
- - Datasets 2.19.2
67
  - Tokenizers 0.19.1
 
20
 
21
  This model is a fine-tuned version of [NousResearch/Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.9957
24
 
25
  ## Model description
26
 
 
50
 
51
  ### Training results
52
 
53
+ | Training Loss | Epoch | Step | Validation Loss |
54
+ |:-------------:|:-----:|:----:|:---------------:|
55
+ | 1.4216 | 1.25 | 25 | 1.4108 |
56
+ | 0.5039 | 2.5 | 50 | 1.6955 |
57
+ | 0.183 | 3.75 | 75 | 1.8566 |
58
+ | 0.1127 | 5.0 | 100 | 1.9957 |
59
 
60
 
61
  ### Framework versions
 
63
  - PEFT 0.11.1
64
  - Transformers 4.41.2
65
  - Pytorch 2.3.0+cu121
66
+ - Datasets 2.20.0
67
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "up_proj",
24
- "k_proj",
25
  "o_proj",
26
- "down_proj",
27
  "v_proj",
 
 
28
  "gate_proj",
29
- "q_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "o_proj",
 
24
  "v_proj",
25
+ "q_proj",
26
+ "down_proj",
27
  "gate_proj",
28
+ "k_proj",
29
+ "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5eb2ba5bd31fe265e089ab86d5ca7f0167c063cc985c997332e6d56c04fb999
3
- size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e44ce263e6fd885f50d82ca515b9325375b43ee36ededb75acf161ce88bc2e41
3
+ size 48
config.json CHANGED
@@ -6,7 +6,7 @@
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "bos_token_id": 128000,
9
- "eos_token_id": 128001,
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
@@ -18,12 +18,27 @@
18
  "num_hidden_layers": 32,
19
  "num_key_value_heads": 8,
20
  "pretraining_tp": 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  "rms_norm_eps": 1e-05,
22
  "rope_scaling": null,
23
  "rope_theta": 500000.0,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "float32",
26
  "transformers_version": "4.41.2",
27
- "use_cache": true,
28
  "vocab_size": 128256
29
  }
 
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "bos_token_id": 128000,
9
+ "eos_token_id": 128009,
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
 
18
  "num_hidden_layers": 32,
19
  "num_key_value_heads": 8,
20
  "pretraining_tp": 1,
21
+ "quantization_config": {
22
+ "_load_in_4bit": true,
23
+ "_load_in_8bit": false,
24
+ "bnb_4bit_compute_dtype": "float16",
25
+ "bnb_4bit_quant_storage": "uint8",
26
+ "bnb_4bit_quant_type": "nf4",
27
+ "bnb_4bit_use_double_quant": true,
28
+ "llm_int8_enable_fp32_cpu_offload": false,
29
+ "llm_int8_has_fp16_weight": false,
30
+ "llm_int8_skip_modules": null,
31
+ "llm_int8_threshold": 6.0,
32
+ "load_in_4bit": true,
33
+ "load_in_8bit": false,
34
+ "quant_method": "bitsandbytes"
35
+ },
36
  "rms_norm_eps": 1e-05,
37
  "rope_scaling": null,
38
  "rope_theta": 500000.0,
39
  "tie_word_embeddings": false,
40
  "torch_dtype": "float32",
41
  "transformers_version": "4.41.2",
42
+ "use_cache": false,
43
  "vocab_size": 128256
44
  }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6724f0c2a41ca11368fe260624fa760a1b7ac1d936f76b17ba92b652dc150ec1
3
+ size 4997579690
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5719f4c9810195ddf264a0868adaa25a2802b31c5acce12347a51e6a8dd2d1cd
3
+ size 2807050004
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
runs/Jun14_05-45-11_2324becbf0e1/events.out.tfevents.1718343913.2324becbf0e1.965.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39b2ad3a770376634e6269c0288bd4673324837d60d18330c6404c2e9ecaf5c2
3
+ size 9238
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88ec13acf05418ea06f7ef2cb995791d2f2ad262c2272593cce204cea317952b
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddeed7fc25a7b2d97345ae61c62c8ef1296bab1c3c6a1c366bd84db2d6c05323
3
  size 5368