danielhanchen commited on
Commit
79515e1
1 Parent(s): b8c2c9c

Upload MistralForCausalLM

Browse files
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  language:
3
  - en
4
- license: mit
5
  library_name: transformers
 
6
  tags:
7
  - unsloth
8
  - phi3
 
1
  ---
2
  language:
3
  - en
 
4
  library_name: transformers
5
+ license: mit
6
  tags:
7
  - unsloth
8
  - phi3
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 2048,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.41.0.dev0",
24
  "use_cache": true,
25
  "vocab_size": 32064
26
  }
 
20
  "sliding_window": 2048,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.43.0.dev0",
24
  "use_cache": true,
25
  "vocab_size": 32064
26
  }
generation_config.json CHANGED
@@ -1,11 +1,7 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
- "eos_token_id": [
5
- 32000,
6
- 32001,
7
- 32007
8
- ],
9
- "pad_token_id": 32009,
10
- "transformers_version": "4.41.0"
11
  }
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
+ "eos_token_id": 32000,
5
+ "pad_token_id": 32000,
6
+ "transformers_version": "4.43.0.dev0"
 
 
 
 
7
  }
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56a30eb57dba0316d47563c10e56c2766c0ca8268bb9aeddc2cd6425b0eee18d
3
- size 3997254704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3403fc7e50e4a23a265a54ee7c74c465be32dcf93371d5346d38b9a724f0546f
3
+ size 4991370968
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fa1b757c69eb9a1ea2f14e41be77eb554f79378a0a450b343dc35848d820238
3
- size 3644938080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79e3d00e5431b1b409d0222aea0daa244f715dc8a030b0dbc5e44be9b7353112
3
+ size 2650821816
model.safetensors.index.json CHANGED
@@ -77,42 +77,42 @@
77
  "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
78
  "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
79
  "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
80
- "model.layers.16.input_layernorm.weight": "model-00002-of-00002.safetensors",
81
- "model.layers.16.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
82
  "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
83
  "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
84
- "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
85
  "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
86
  "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
87
  "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
88
  "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
89
- "model.layers.17.input_layernorm.weight": "model-00002-of-00002.safetensors",
90
- "model.layers.17.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
91
- "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
92
- "model.layers.17.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
93
- "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
94
- "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
95
- "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
96
- "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
97
- "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
98
- "model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
99
- "model.layers.18.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
100
- "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
101
- "model.layers.18.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
102
- "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
103
- "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
104
- "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
105
- "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
106
- "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
107
- "model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
108
- "model.layers.19.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
109
- "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
110
- "model.layers.19.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
111
- "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
112
- "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
113
- "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
114
- "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
115
- "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
116
  "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
117
  "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
118
  "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
@@ -122,23 +122,23 @@
122
  "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
123
  "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
124
  "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
125
- "model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
126
- "model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
127
- "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
128
- "model.layers.20.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
129
- "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
130
- "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
131
- "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
132
- "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
133
- "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
134
  "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
135
  "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
136
  "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
137
  "model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
138
  "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
139
- "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
141
- "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
143
  "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
144
  "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
 
77
  "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
78
  "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
79
  "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
82
  "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
83
  "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
85
  "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
86
  "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
87
  "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
88
  "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
116
  "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
117
  "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
118
  "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
 
122
  "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
123
  "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
124
  "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
134
  "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
135
  "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
136
  "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
137
  "model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
138
  "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
143
  "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
144
  "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",