Lugaborg commited on
Commit
35ea1da
1 Parent(s): fc9a6fb

Upload LlamaForCausalLM

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/home/lunarsylph/_code_/neuralpantheon/output-model-directory",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -24,5 +24,5 @@
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.40.0",
26
  "use_cache": false,
27
- "vocab_size": 128256
28
  }
 
1
  {
2
+ "_name_or_path": "/home/lunarsylph/_code_/neuralpantheon/__PEFT__/output",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.40.0",
26
  "use_cache": false,
27
+ "vocab_size": 100290
28
  }
generation_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 128000,
 
4
  "eos_token_id": 128001,
5
- "transformers_version": "4.40.0",
6
- "use_cache": false
7
  }
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 128000,
4
+ "do_sample": true,
5
  "eos_token_id": 128001,
6
+ "transformers_version": "4.40.0"
 
7
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59d1e235a8553bc7f486595046c10b30372f7eeeede7a73c49dc169ff97be325
3
- size 4976698672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fd41cc3c98e42778cc2d30acf1b5770f495ddb6944c38ee0be464077db0297a
3
+ size 4948928368
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8a387b1252e856e997b569014e198e342daed4a2690964faa14877360858c88
3
- size 4999802720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc70cac42a1a20102a8681347bff66d6b2a6ab0f0ecdfdba82cc2d0d44cfaeff
3
+ size 4915916168
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2d5b1298d13884312eea71d47a3841ee8335058dc2c39a559eb9688bbf23879
3
- size 4915916176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30cb6a3c3e5aad28615fd291b2ea79a67ea9922db412b0dca834669328978abd
3
+ size 4915941072
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8abc53444813a8946947f9d3db0c5db49b33b074d2f82d1aa41e6efff2d13d0f
3
- size 1168138808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b75672657c68183f4f1cde84a7aa352e98bb7db287e900a064e5621a6e17a384
3
+ size 821575808
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 16060522496
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00004-of-00004.safetensors",
@@ -125,7 +125,7 @@
125
  "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
  "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
  "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
128
- "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
129
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
  "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
131
  "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
@@ -230,11 +230,11 @@
230
  "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
231
  "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
232
  "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
- "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
234
- "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
235
  "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
236
  "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
237
- "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
238
  "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
239
  "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
240
  "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
@@ -286,13 +286,13 @@
286
  "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
287
  "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
288
  "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
289
- "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
290
  "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
291
  "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
292
- "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
293
- "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
294
- "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
295
- "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
296
- "model.norm.weight": "model-00004-of-00004.safetensors"
297
  }
298
  }
 
1
  {
2
  "metadata": {
3
+ "total_size": 15602327552
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00004-of-00004.safetensors",
 
125
  "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
  "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
  "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
129
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
  "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
131
  "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
 
230
  "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
231
  "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
232
  "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
235
  "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
236
  "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
238
  "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
239
  "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
240
  "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
 
286
  "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
287
  "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
288
  "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
290
  "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
291
  "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
296
+ "model.norm.weight": "model-00003-of-00004.safetensors"
297
  }
298
  }