spachava commited on
Commit
5c4f9ea
1 Parent(s): 9fca4a5

Upload CompressedLlamaForCausalLM

Browse files
config.json CHANGED
@@ -19,8 +19,10 @@
19
  "rms_norm_eps": 1e-06,
20
  "rope_scaling": null,
21
  "rope_theta": 10000.0,
22
- "share_layers": "none",
23
  "tie_word_embeddings": false,
 
 
 
24
  "torch_dtype": "float32",
25
  "transformers_version": "4.35.2",
26
  "use_cache": true,
 
19
  "rms_norm_eps": 1e-06,
20
  "rope_scaling": null,
21
  "rope_theta": 10000.0,
 
22
  "tie_word_embeddings": false,
23
+ "tied_layers": {
24
+ "1": 0
25
+ },
26
  "torch_dtype": "float32",
27
  "transformers_version": "4.35.2",
28
  "use_cache": true,
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cde221462593019af025faf5cce19f0bcf432f6106d8d4f6193ef65dde77dec
3
- size 4993264136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:410627979de4319c4623cd5dbc988483c182d26e893f6d230a6ef6134bd482a7
3
+ size 4923632128
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da989a50e26cc2e59c9c546b439ea6d3cedbf2090068f1159b61bfde743970ba
3
- size 4997386488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d81c6fa49d323e616f26b3a824a2f73049c2596008583b13a3d7a38115647a4
3
+ size 4956426376
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77e75f2f2edceb7bee4131d7e189af5f09aaa91d3f9b95b62b53811fb671293b
3
- size 3715271008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c138227dedead48e31613d734e633baf4096b895901d045ef51944dd19c2631
3
+ size 3494086776
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 13705894400
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
@@ -15,9 +15,6 @@
15
  "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
  "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
  "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
- "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
- "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
- "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
  "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
  "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
@@ -106,8 +103,8 @@
106
  "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
  "model.layers.19.input_layernorm.weight": "model-00003-of-00003.safetensors",
108
  "model.layers.19.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
109
- "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
110
- "model.layers.19.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
111
  "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
112
  "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
  "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
@@ -232,11 +229,11 @@
232
  "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
233
  "model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
234
  "model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
235
- "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
236
- "model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
237
  "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
238
  "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
239
- "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
240
  "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
241
  "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
242
  "model.norm.weight": "model-00003-of-00003.safetensors"
 
1
  {
2
  "metadata": {
3
+ "total_size": 13374118400
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
 
15
  "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
  "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
  "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
 
 
 
18
  "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
19
  "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
20
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
 
103
  "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
104
  "model.layers.19.input_layernorm.weight": "model-00003-of-00003.safetensors",
105
  "model.layers.19.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
106
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
108
  "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
109
  "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
110
  "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
229
  "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
230
  "model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
231
  "model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
232
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
233
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
234
  "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
235
  "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
236
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
237
  "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
238
  "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
239
  "model.norm.weight": "model-00003-of-00003.safetensors"