INC4AI commited on
Commit
a4f27ab
·
verified ·
1 Parent(s): e5781dd

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -91,195 +91,257 @@
91
  "partial_rotary_factor": 0.5,
92
  "qk_norm_type": "per_layer",
93
  "quantization_config": {
94
- "autoround_version": "0.12.0",
95
  "bits": 4,
96
  "data_type": "int",
97
  "extra_config": {
98
  "model.layers.0.block_sparse_moe.gate": {
99
- "bits": 8
 
100
  },
101
  "model.layers.1.block_sparse_moe.gate": {
102
- "bits": 8
 
103
  },
104
  "model.layers.10.block_sparse_moe.gate": {
105
- "bits": 8
 
106
  },
107
  "model.layers.11.block_sparse_moe.gate": {
108
- "bits": 8
 
109
  },
110
  "model.layers.12.block_sparse_moe.gate": {
111
- "bits": 8
 
112
  },
113
  "model.layers.13.block_sparse_moe.gate": {
114
- "bits": 8
 
115
  },
116
  "model.layers.14.block_sparse_moe.gate": {
117
- "bits": 8
 
118
  },
119
  "model.layers.15.block_sparse_moe.gate": {
120
- "bits": 8
 
121
  },
122
  "model.layers.16.block_sparse_moe.gate": {
123
- "bits": 8
 
124
  },
125
  "model.layers.17.block_sparse_moe.gate": {
126
- "bits": 8
 
127
  },
128
  "model.layers.18.block_sparse_moe.gate": {
129
- "bits": 8
 
130
  },
131
  "model.layers.19.block_sparse_moe.gate": {
132
- "bits": 8
 
133
  },
134
  "model.layers.2.block_sparse_moe.gate": {
135
- "bits": 8
 
136
  },
137
  "model.layers.20.block_sparse_moe.gate": {
138
- "bits": 8
 
139
  },
140
  "model.layers.21.block_sparse_moe.gate": {
141
- "bits": 8
 
142
  },
143
  "model.layers.22.block_sparse_moe.gate": {
144
- "bits": 8
 
145
  },
146
  "model.layers.23.block_sparse_moe.gate": {
147
- "bits": 8
 
148
  },
149
  "model.layers.24.block_sparse_moe.gate": {
150
- "bits": 8
 
151
  },
152
  "model.layers.25.block_sparse_moe.gate": {
153
- "bits": 8
 
154
  },
155
  "model.layers.26.block_sparse_moe.gate": {
156
- "bits": 8
 
157
  },
158
  "model.layers.27.block_sparse_moe.gate": {
159
- "bits": 8
 
160
  },
161
  "model.layers.28.block_sparse_moe.gate": {
162
- "bits": 8
 
163
  },
164
  "model.layers.29.block_sparse_moe.gate": {
165
- "bits": 8
 
166
  },
167
  "model.layers.3.block_sparse_moe.gate": {
168
- "bits": 8
 
169
  },
170
  "model.layers.30.block_sparse_moe.gate": {
171
- "bits": 8
 
172
  },
173
  "model.layers.31.block_sparse_moe.gate": {
174
- "bits": 8
 
175
  },
176
  "model.layers.32.block_sparse_moe.gate": {
177
- "bits": 8
 
178
  },
179
  "model.layers.33.block_sparse_moe.gate": {
180
- "bits": 8
 
181
  },
182
  "model.layers.34.block_sparse_moe.gate": {
183
- "bits": 8
 
184
  },
185
  "model.layers.35.block_sparse_moe.gate": {
186
- "bits": 8
 
187
  },
188
  "model.layers.36.block_sparse_moe.gate": {
189
- "bits": 8
 
190
  },
191
  "model.layers.37.block_sparse_moe.gate": {
192
- "bits": 8
 
193
  },
194
  "model.layers.38.block_sparse_moe.gate": {
195
- "bits": 8
 
196
  },
197
  "model.layers.39.block_sparse_moe.gate": {
198
- "bits": 8
 
199
  },
200
  "model.layers.4.block_sparse_moe.gate": {
201
- "bits": 8
 
202
  },
203
  "model.layers.40.block_sparse_moe.gate": {
204
- "bits": 8
 
205
  },
206
  "model.layers.41.block_sparse_moe.gate": {
207
- "bits": 8
 
208
  },
209
  "model.layers.42.block_sparse_moe.gate": {
210
- "bits": 8
 
211
  },
212
  "model.layers.43.block_sparse_moe.gate": {
213
- "bits": 8
 
214
  },
215
  "model.layers.44.block_sparse_moe.gate": {
216
- "bits": 8
 
217
  },
218
  "model.layers.45.block_sparse_moe.gate": {
219
- "bits": 8
 
220
  },
221
  "model.layers.46.block_sparse_moe.gate": {
222
- "bits": 8
 
223
  },
224
  "model.layers.47.block_sparse_moe.gate": {
225
- "bits": 8
 
226
  },
227
  "model.layers.48.block_sparse_moe.gate": {
228
- "bits": 8
 
229
  },
230
  "model.layers.49.block_sparse_moe.gate": {
231
- "bits": 8
 
232
  },
233
  "model.layers.5.block_sparse_moe.gate": {
234
- "bits": 8
 
235
  },
236
  "model.layers.50.block_sparse_moe.gate": {
237
- "bits": 8
 
238
  },
239
  "model.layers.51.block_sparse_moe.gate": {
240
- "bits": 8
 
241
  },
242
  "model.layers.52.block_sparse_moe.gate": {
243
- "bits": 8
 
244
  },
245
  "model.layers.53.block_sparse_moe.gate": {
246
- "bits": 8
 
247
  },
248
  "model.layers.54.block_sparse_moe.gate": {
249
- "bits": 8
 
250
  },
251
  "model.layers.55.block_sparse_moe.gate": {
252
- "bits": 8
 
253
  },
254
  "model.layers.56.block_sparse_moe.gate": {
255
- "bits": 8
 
256
  },
257
  "model.layers.57.block_sparse_moe.gate": {
258
- "bits": 8
 
259
  },
260
  "model.layers.58.block_sparse_moe.gate": {
261
- "bits": 8
 
262
  },
263
  "model.layers.59.block_sparse_moe.gate": {
264
- "bits": 8
 
265
  },
266
  "model.layers.6.block_sparse_moe.gate": {
267
- "bits": 8
 
268
  },
269
  "model.layers.60.block_sparse_moe.gate": {
270
- "bits": 8
 
271
  },
272
  "model.layers.61.block_sparse_moe.gate": {
273
- "bits": 8
 
274
  },
275
  "model.layers.7.block_sparse_moe.gate": {
276
- "bits": 8
 
277
  },
278
  "model.layers.8.block_sparse_moe.gate": {
279
- "bits": 8
 
280
  },
281
  "model.layers.9.block_sparse_moe.gate": {
282
- "bits": 8
 
283
  }
284
  },
285
  "group_size": 128,
 
91
  "partial_rotary_factor": 0.5,
92
  "qk_norm_type": "per_layer",
93
  "quantization_config": {
94
+ "autoround_version": "0.10.0",
95
  "bits": 4,
96
  "data_type": "int",
97
  "extra_config": {
98
  "model.layers.0.block_sparse_moe.gate": {
99
+ "bits": 16,
100
+ "data_type": "float"
101
  },
102
  "model.layers.1.block_sparse_moe.gate": {
103
+ "bits": 16,
104
+ "data_type": "float"
105
  },
106
  "model.layers.10.block_sparse_moe.gate": {
107
+ "bits": 16,
108
+ "data_type": "float"
109
  },
110
  "model.layers.11.block_sparse_moe.gate": {
111
+ "bits": 16,
112
+ "data_type": "float"
113
  },
114
  "model.layers.12.block_sparse_moe.gate": {
115
+ "bits": 16,
116
+ "data_type": "float"
117
  },
118
  "model.layers.13.block_sparse_moe.gate": {
119
+ "bits": 16,
120
+ "data_type": "float"
121
  },
122
  "model.layers.14.block_sparse_moe.gate": {
123
+ "bits": 16,
124
+ "data_type": "float"
125
  },
126
  "model.layers.15.block_sparse_moe.gate": {
127
+ "bits": 16,
128
+ "data_type": "float"
129
  },
130
  "model.layers.16.block_sparse_moe.gate": {
131
+ "bits": 16,
132
+ "data_type": "float"
133
  },
134
  "model.layers.17.block_sparse_moe.gate": {
135
+ "bits": 16,
136
+ "data_type": "float"
137
  },
138
  "model.layers.18.block_sparse_moe.gate": {
139
+ "bits": 16,
140
+ "data_type": "float"
141
  },
142
  "model.layers.19.block_sparse_moe.gate": {
143
+ "bits": 16,
144
+ "data_type": "float"
145
  },
146
  "model.layers.2.block_sparse_moe.gate": {
147
+ "bits": 16,
148
+ "data_type": "float"
149
  },
150
  "model.layers.20.block_sparse_moe.gate": {
151
+ "bits": 16,
152
+ "data_type": "float"
153
  },
154
  "model.layers.21.block_sparse_moe.gate": {
155
+ "bits": 16,
156
+ "data_type": "float"
157
  },
158
  "model.layers.22.block_sparse_moe.gate": {
159
+ "bits": 16,
160
+ "data_type": "float"
161
  },
162
  "model.layers.23.block_sparse_moe.gate": {
163
+ "bits": 16,
164
+ "data_type": "float"
165
  },
166
  "model.layers.24.block_sparse_moe.gate": {
167
+ "bits": 16,
168
+ "data_type": "float"
169
  },
170
  "model.layers.25.block_sparse_moe.gate": {
171
+ "bits": 16,
172
+ "data_type": "float"
173
  },
174
  "model.layers.26.block_sparse_moe.gate": {
175
+ "bits": 16,
176
+ "data_type": "float"
177
  },
178
  "model.layers.27.block_sparse_moe.gate": {
179
+ "bits": 16,
180
+ "data_type": "float"
181
  },
182
  "model.layers.28.block_sparse_moe.gate": {
183
+ "bits": 16,
184
+ "data_type": "float"
185
  },
186
  "model.layers.29.block_sparse_moe.gate": {
187
+ "bits": 16,
188
+ "data_type": "float"
189
  },
190
  "model.layers.3.block_sparse_moe.gate": {
191
+ "bits": 16,
192
+ "data_type": "float"
193
  },
194
  "model.layers.30.block_sparse_moe.gate": {
195
+ "bits": 16,
196
+ "data_type": "float"
197
  },
198
  "model.layers.31.block_sparse_moe.gate": {
199
+ "bits": 16,
200
+ "data_type": "float"
201
  },
202
  "model.layers.32.block_sparse_moe.gate": {
203
+ "bits": 16,
204
+ "data_type": "float"
205
  },
206
  "model.layers.33.block_sparse_moe.gate": {
207
+ "bits": 16,
208
+ "data_type": "float"
209
  },
210
  "model.layers.34.block_sparse_moe.gate": {
211
+ "bits": 16,
212
+ "data_type": "float"
213
  },
214
  "model.layers.35.block_sparse_moe.gate": {
215
+ "bits": 16,
216
+ "data_type": "float"
217
  },
218
  "model.layers.36.block_sparse_moe.gate": {
219
+ "bits": 16,
220
+ "data_type": "float"
221
  },
222
  "model.layers.37.block_sparse_moe.gate": {
223
+ "bits": 16,
224
+ "data_type": "float"
225
  },
226
  "model.layers.38.block_sparse_moe.gate": {
227
+ "bits": 16,
228
+ "data_type": "float"
229
  },
230
  "model.layers.39.block_sparse_moe.gate": {
231
+ "bits": 16,
232
+ "data_type": "float"
233
  },
234
  "model.layers.4.block_sparse_moe.gate": {
235
+ "bits": 16,
236
+ "data_type": "float"
237
  },
238
  "model.layers.40.block_sparse_moe.gate": {
239
+ "bits": 16,
240
+ "data_type": "float"
241
  },
242
  "model.layers.41.block_sparse_moe.gate": {
243
+ "bits": 16,
244
+ "data_type": "float"
245
  },
246
  "model.layers.42.block_sparse_moe.gate": {
247
+ "bits": 16,
248
+ "data_type": "float"
249
  },
250
  "model.layers.43.block_sparse_moe.gate": {
251
+ "bits": 16,
252
+ "data_type": "float"
253
  },
254
  "model.layers.44.block_sparse_moe.gate": {
255
+ "bits": 16,
256
+ "data_type": "float"
257
  },
258
  "model.layers.45.block_sparse_moe.gate": {
259
+ "bits": 16,
260
+ "data_type": "float"
261
  },
262
  "model.layers.46.block_sparse_moe.gate": {
263
+ "bits": 16,
264
+ "data_type": "float"
265
  },
266
  "model.layers.47.block_sparse_moe.gate": {
267
+ "bits": 16,
268
+ "data_type": "float"
269
  },
270
  "model.layers.48.block_sparse_moe.gate": {
271
+ "bits": 16,
272
+ "data_type": "float"
273
  },
274
  "model.layers.49.block_sparse_moe.gate": {
275
+ "bits": 16,
276
+ "data_type": "float"
277
  },
278
  "model.layers.5.block_sparse_moe.gate": {
279
+ "bits": 16,
280
+ "data_type": "float"
281
  },
282
  "model.layers.50.block_sparse_moe.gate": {
283
+ "bits": 16,
284
+ "data_type": "float"
285
  },
286
  "model.layers.51.block_sparse_moe.gate": {
287
+ "bits": 16,
288
+ "data_type": "float"
289
  },
290
  "model.layers.52.block_sparse_moe.gate": {
291
+ "bits": 16,
292
+ "data_type": "float"
293
  },
294
  "model.layers.53.block_sparse_moe.gate": {
295
+ "bits": 16,
296
+ "data_type": "float"
297
  },
298
  "model.layers.54.block_sparse_moe.gate": {
299
+ "bits": 16,
300
+ "data_type": "float"
301
  },
302
  "model.layers.55.block_sparse_moe.gate": {
303
+ "bits": 16,
304
+ "data_type": "float"
305
  },
306
  "model.layers.56.block_sparse_moe.gate": {
307
+ "bits": 16,
308
+ "data_type": "float"
309
  },
310
  "model.layers.57.block_sparse_moe.gate": {
311
+ "bits": 16,
312
+ "data_type": "float"
313
  },
314
  "model.layers.58.block_sparse_moe.gate": {
315
+ "bits": 16,
316
+ "data_type": "float"
317
  },
318
  "model.layers.59.block_sparse_moe.gate": {
319
+ "bits": 16,
320
+ "data_type": "float"
321
  },
322
  "model.layers.6.block_sparse_moe.gate": {
323
+ "bits": 16,
324
+ "data_type": "float"
325
  },
326
  "model.layers.60.block_sparse_moe.gate": {
327
+ "bits": 16,
328
+ "data_type": "float"
329
  },
330
  "model.layers.61.block_sparse_moe.gate": {
331
+ "bits": 16,
332
+ "data_type": "float"
333
  },
334
  "model.layers.7.block_sparse_moe.gate": {
335
+ "bits": 16,
336
+ "data_type": "float"
337
  },
338
  "model.layers.8.block_sparse_moe.gate": {
339
+ "bits": 16,
340
+ "data_type": "float"
341
  },
342
  "model.layers.9.block_sparse_moe.gate": {
343
+ "bits": 16,
344
+ "data_type": "float"
345
  }
346
  },
347
  "group_size": 128,
model-00001-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac7ccdfbcf8f0f716981ee073d3da4514979755a9dccc4304747f0c09f777863
3
- size 5369490592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e5ae5e9ac1ccf7b1f5ff26f3734ed5b5d5d637e947e288896b8cc69093134fa
3
+ size 5369526784
model-00002-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ba97056e3bb75a0a5aaaa1b0a0cb2ab062883c721c1476063fb35cfb9652470
3
- size 5369490592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e98c3eb396051ca1a2d833e64901cb8602e9fb407ca6b17364a10358ec483bc
3
+ size 5369526784
model-00003-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:816ac5aa324c319fd7a1217601c230a071c7d7d3f9406ae6bbf493359780af41
3
- size 5369490592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16cc4f34ec3ab47d3265457363d9a2d4c23ee881baf03afc849897abf194bed5
3
+ size 5369526784
model-00004-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c00a8f7fe0dfb8580e4ed6485ec6f1b8cf39153c40a508226685ab17499f6df
3
- size 5369493832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04ccfe774e847732281b49f210032f089201b8b671304477e8e12305decc5e44
3
+ size 5369530008
model-00005-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:273bc78e70bb10f788576577f15d6b0d60dda145663c4a866a7670f76565b7cf
3
- size 5369497552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f21afde16ccf5f4e7969827118d80a8ec7c163649cf52423911e89d0625d21d4
3
+ size 5369533728
model-00006-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad57849acd81f99552eb1781a644b697407a57229f77050feb7f5f90f4f50330
3
- size 5367877136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45a84726e5323c4b98c2b0816cb705aa7d267805e00c045df16a57712c6cabbd
3
+ size 5368718552
model-00007-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:79342625686be14f8cfd147ba43283da4edc89313d568693f64aab553f259a9b
3
- size 5369497120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56b2b366334aea8786dcc9902c9dd23c0263cdfdcb57ea6b4bd9221aafd68d63
3
+ size 5369533304
model-00008-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0237ef0a1e558673ab4530e809fb45bd609a1be4fee34ebb6269676988ff86af
3
- size 5369497120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56a39bb13d9d9e1a2d017a40fa29728daf03cf375cc9667e2205af38878541e3
3
+ size 5369533304
model-00009-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67ad6d8ad67d8a51c290115412a76da49ae1bc3d3de554bbc005ecd9368c8542
3
- size 5369497248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5f36339f4df242ee214fb377cded9b226a9829b7696560cf38316433eb05404
3
+ size 5369533400
model-00010-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69155430d9b29a777fd318831b2cd58e052f9c4ef1568de68602e4b630ae11d0
3
- size 5369497552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0053868da8dcb6b6a72b2f424613bae04801e7e5f164bc83d4994e46eda2468e
3
+ size 5369533728
model-00011-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1130cdf7b9c2ff3607abfa17a86d505e6b7fd8388a94e3b1e0a934a197a6e3b
3
- size 5367877320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e28f7d73551146fcc7772bc0c6a271aa27fc4666670b4451f4ef7075c5b8ba92
3
+ size 5368718752
model-00012-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad87c79aaf8b433ccdef76d7611a73181b71c1cf873ea9f35a3c5ef0691e00de
3
- size 5369497120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6bf87fd716ac5b6a2a469481ed84036d5d08bc5600c3602787a2e0270c91969
3
+ size 5369533304
model-00013-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3935d2640f537adc7365c32f03b7e62e3d544ce0abbe95d541deccfc5fe4132f
3
- size 5369497120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:382418825b31937eac99c51ddb91cd7fe644c6b1e848ddad2d885acec8283684
3
+ size 5369533304
model-00014-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b35ec88a46795b5608610c6ec7f948f8851110bf6d972dcf1480ca5d93ecee0
3
- size 5369497120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5171316370432a2daec4ba3558aec04145517c065c70009c10acfc8087a8272
3
+ size 5369533304
model-00015-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94c6b72fd6057147aa896d0df469d9d6bbb42e6bcaa12c1c70186ed385a68710
3
- size 5369497488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26e9a9b914ffd05766a87136fc8da928d3bad75b5654fd65fa7742f115640df8
3
+ size 5369533624
model-00016-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bf054f3f80d90b77abbb19858031bcfff76ced7117dac966f9f40a700e872a7
3
- size 5369497552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71d962ef8b55d7d1fa1b64ee1a1730bd28d6fa443255e671977672064aaadd61
3
+ size 5369533728
model-00017-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:03caa0781ee0f767c121a9aa8fcd45fc898f42ac7d36fff17a24ac688a009f11
3
- size 5367877072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d3dd0945cbce9cb7f3cde174a5ce9bdabba217a48e09fc680e402df25f414b
3
+ size 5368718528
model-00018-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5583b36d5e861574406d582621f2ba279a3c160c7e334baf6c917becdd9a77c
3
- size 5369497120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c641616d4f03ef3194b1d6bff11a51de5b170844a4a4c68eda2f01f33a8ec33
3
+ size 5369533304
model-00019-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76cd64b81f6c8d45ddd870a802a4fe2670e4aacee50c4f677e96036836505d6c
3
- size 5369497120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1395afe0043c0141b40a3f103ed0725fb783e3e7b5678d71f64493b99514cc9
3
+ size 5369533304
model-00020-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a458f28239666aea5534261f5e387cf28caf720391fad6b5ec0d0aabe5e6e11
3
- size 5369497304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42d564ef235248ee25dd7550831bdf0acb54275e1e63a621af4df7234ca1efff
3
+ size 5369533424
model-00021-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd85423b16cf7a337cb36fd93fd72a5170d67565b3f1f0f3e6a6dcf9f8c94382
3
- size 5369497552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:161e46898b1d19d5c1144204f3cdc01b39667af407656a77ae7396f6d69932bd
3
+ size 5369533728
model-00022-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:752e0e58ef03923e33d1336cc61178486d18b3be38306430a5b3d883f28ef38e
3
- size 5367877264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e07ee08f4f19ba41ffd0608fdc26f56eeacc8dcc8fd65c405a428bcb5af9cd6
3
+ size 5368718728
model-00023-of-00023.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ce66ef48736486420112b1fcd566664e7cba390bf8483d9c955864f96660b9f
3
- size 2553281576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04e5bab846c3dbc86ff19effb026d8dab289191441f24b90ef148a6836823332
3
+ size 2596866240
model.safetensors.index.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2c76f965bec7f8f91ae1e10c70258cc1604de2ae20462488cbfd1cd8b1c62f5
3
- size 14068102
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ff90e975bec219135b35185377f3369c0497cea08d4b4f551743970c5e77ce7
3
+ size 14057148
modeling_minimax_m2.py ADDED
@@ -0,0 +1,706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from collections.abc import Callable
24
+ from typing import Optional, Union, Unpack
25
+
26
+ import torch
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.generation import GenerationMixin
32
+ from transformers.integrations import use_kernel_forward_from_hub
33
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
34
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
35
+ from transformers.modeling_layers import (
36
+ GenericForQuestionAnswering,
37
+ GenericForSequenceClassification,
38
+ GenericForTokenClassification,
39
+ GradientCheckpointingLayer,
40
+ )
41
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
42
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
43
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
44
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
45
+ from transformers.utils.deprecation import deprecate_kwarg
46
+ from transformers.utils.generic import OutputRecorder, check_model_inputs
47
+ from .configuration_minimax_m2 import MiniMaxM2Config
48
+
49
+
50
+ class MiniMaxM2MLP(nn.Module):
51
+ def __init__(self, config: MiniMaxM2Config):
52
+ super().__init__()
53
+ self.ffn_dim = config.intermediate_size
54
+ self.hidden_dim = config.hidden_size
55
+
56
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
57
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
58
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
59
+
60
+ self.act_fn = ACT2FN[config.hidden_act]
61
+
62
+ def forward(self, hidden_states):
63
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
64
+ current_hidden_states = self.w2(current_hidden_states)
65
+ return current_hidden_states
66
+
67
+
68
+ class MiniMaxM2Experts(nn.ModuleList):
69
+ """
70
+ ModuleList of experts.
71
+ """
72
+
73
+ def __init__(self, config: MiniMaxM2Config):
74
+ super().__init__()
75
+ self.top_k = config.num_experts_per_tok
76
+ self.num_experts = config.num_local_experts
77
+ for _ in range(self.num_experts):
78
+ self.append(MiniMaxM2MLP(config))
79
+
80
+ def forward(
81
+ self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor
82
+ ) -> torch.Tensor:
83
+ """
84
+ Args:
85
+ hidden_states: (batch_size * sequence_length, hidden_dim)
86
+ selected_experts: (batch_size * sequence_length, top_k)
87
+ routing_weights: (batch_size * sequence_length, top_k)
88
+ Returns:
89
+ (batch_size * sequence_length, hidden_dim)
90
+ """
91
+ final_hidden_states = torch.zeros_like(hidden_states)
92
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0)
93
+
94
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
95
+ for expert_idx in expert_hit:
96
+ idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
97
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
98
+ current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None]
99
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
100
+ return final_hidden_states
101
+
102
+
103
+ class MiniMaxM2SparseMoeBlock(nn.Module):
104
+ def __init__(self, config):
105
+ super().__init__()
106
+ self.top_k = config.num_experts_per_tok
107
+ self.jitter_noise = config.router_jitter_noise
108
+ self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False)
109
+ self.experts = MiniMaxM2Experts(config)
110
+ self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
111
+
112
+ def route_tokens_to_experts(self, router_logits):
113
+ routing_weights = torch.nn.functional.sigmoid(router_logits.float())
114
+ scores_for_choice = routing_weights + self.e_score_correction_bias
115
+ _, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
116
+ top_k_weights = routing_weights.gather(1, top_k_index)
117
+ top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
118
+ return top_k_index, top_k_weights.to(router_logits.dtype)
119
+
120
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
121
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
122
+ if self.training and self.jitter_noise > 0:
123
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
124
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
125
+ router_logits = self.gate(hidden_states)
126
+ top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
127
+ hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype))
128
+ hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
129
+ return hidden_states, router_logits
130
+
131
+
132
+ @use_kernel_forward_from_hub("RMSNorm")
133
+ class MiniMaxM2RMSNorm(nn.Module):
134
+ def __init__(self, hidden_size, eps=1e-6):
135
+ """
136
+ MiniMaxM2RMSNorm is equivalent to T5LayerNorm
137
+ """
138
+ super().__init__()
139
+ self.weight = nn.Parameter(torch.ones(hidden_size))
140
+ self.variance_epsilon = eps
141
+
142
+ def forward(self, hidden_states):
143
+ input_dtype = hidden_states.dtype
144
+ hidden_states = hidden_states.to(torch.float32)
145
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
146
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
147
+ return self.weight * hidden_states.to(input_dtype)
148
+
149
+ def extra_repr(self):
150
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
151
+
152
+
153
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
154
+ """
155
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
156
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
157
+ """
158
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
159
+ if n_rep == 1:
160
+ return hidden_states
161
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
162
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
163
+
164
+
165
+ def eager_attention_forward(
166
+ module: nn.Module,
167
+ query: torch.Tensor,
168
+ key: torch.Tensor,
169
+ value: torch.Tensor,
170
+ attention_mask: Optional[torch.Tensor],
171
+ scaling: float,
172
+ dropout: float = 0.0,
173
+ **kwargs: Unpack[TransformersKwargs],
174
+ ):
175
+ key_states = repeat_kv(key, module.num_key_value_groups)
176
+ value_states = repeat_kv(value, module.num_key_value_groups)
177
+
178
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
179
+ if attention_mask is not None:
180
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
181
+ attn_weights = attn_weights + causal_mask
182
+
183
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
184
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
185
+ attn_output = torch.matmul(attn_weights, value_states)
186
+ attn_output = attn_output.transpose(1, 2).contiguous()
187
+
188
+ return attn_output, attn_weights
189
+
190
+
191
+ def rotate_half(x):
192
+ """Rotates half the hidden dims of the input."""
193
+ x1 = x[..., : x.shape[-1] // 2]
194
+ x2 = x[..., x.shape[-1] // 2 :]
195
+ return torch.cat((-x2, x1), dim=-1)
196
+
197
+
198
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
199
+ """Applies Rotary Position Embedding to the query and key tensors.
200
+
201
+ Args:
202
+ q (`torch.Tensor`): The query tensor.
203
+ k (`torch.Tensor`): The key tensor.
204
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
205
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
206
+ position_ids (`torch.Tensor`, *optional*):
207
+ Deprecated and unused.
208
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
209
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
210
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
211
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
212
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
213
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
214
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
215
+ Returns:
216
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
217
+ """
218
+ cos = cos.unsqueeze(unsqueeze_dim)
219
+ sin = sin.unsqueeze(unsqueeze_dim)
220
+
221
+ # Keep half or full tensor for later concatenation
222
+ rotary_dim = cos.shape[-1]
223
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
224
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
225
+
226
+ # Apply rotary embeddings on the first half or full tensor
227
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
228
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
229
+
230
+ # Concatenate back to full shape
231
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
232
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
233
+ return q_embed, k_embed
234
+
235
+
236
+ class MiniMaxM2Attention(nn.Module):
237
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
238
+
239
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
240
+ super().__init__()
241
+ self.config = config
242
+ self.layer_idx = layer_idx
243
+ self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
244
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
245
+ self.scaling = self.head_dim**-0.5
246
+ self.attention_dropout = config.attention_dropout
247
+ self.is_causal = True
248
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
249
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
250
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
251
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
252
+
253
+ self.use_qk_norm = config.use_qk_norm
254
+ if self.use_qk_norm:
255
+ self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps)
256
+ self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps)
257
+
258
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
259
+ def forward(
260
+ self,
261
+ hidden_states: torch.Tensor,
262
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
263
+ attention_mask: Optional[torch.Tensor],
264
+ past_key_values: Optional[Cache] = None,
265
+ cache_position: Optional[torch.LongTensor] = None,
266
+ **kwargs: Unpack[FlashAttentionKwargs],
267
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
268
+ input_shape = hidden_states.shape[:-1]
269
+ hidden_shape = (*input_shape, -1, self.head_dim)
270
+
271
+ query_states = self.q_proj(hidden_states)
272
+ key_states = self.k_proj(hidden_states)
273
+ value_states = self.v_proj(hidden_states)
274
+
275
+ if self.use_qk_norm: # main diff from Llama
276
+ query_states = self.q_norm(query_states)
277
+ key_states = self.k_norm(key_states)
278
+
279
+ key_states = key_states.view(hidden_shape)
280
+ query_states = query_states.view(hidden_shape)
281
+ value_states = value_states.view(hidden_shape)
282
+
283
+ query_states = query_states.transpose(1, 2)
284
+ key_states = key_states.transpose(1, 2)
285
+ value_states = value_states.transpose(1, 2)
286
+
287
+ cos, sin = position_embeddings
288
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
289
+
290
+ if past_key_values is not None:
291
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
292
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
293
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
294
+
295
+ attention_interface: Callable = eager_attention_forward
296
+ if self.config._attn_implementation != "eager":
297
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
298
+
299
+ attn_output, attn_weights = attention_interface(
300
+ self,
301
+ query_states,
302
+ key_states,
303
+ value_states,
304
+ attention_mask,
305
+ dropout=0.0 if not self.training else self.attention_dropout,
306
+ scaling=self.scaling,
307
+ **kwargs,
308
+ )
309
+
310
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
311
+ attn_output = self.o_proj(attn_output)
312
+ return attn_output, attn_weights
313
+
314
+
315
+ class MiniMaxM2DecoderLayer(GradientCheckpointingLayer):
316
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
317
+ super().__init__()
318
+ self.hidden_size = config.hidden_size
319
+
320
+ self.self_attn = MiniMaxM2Attention(config, layer_idx)
321
+
322
+ self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
323
+ self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
324
+ self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
325
+
326
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
327
+ def forward(
328
+ self,
329
+ hidden_states: torch.Tensor,
330
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
331
+ attention_mask: Optional[torch.Tensor] = None,
332
+ position_ids: Optional[torch.LongTensor] = None,
333
+ past_key_values: Optional[Cache] = None,
334
+ cache_position: Optional[torch.LongTensor] = None,
335
+ **kwargs: Unpack[TransformersKwargs],
336
+ ) -> torch.FloatTensor:
337
+ residual = hidden_states
338
+
339
+ hidden_states = self.input_layernorm(hidden_states)
340
+
341
+ # Self Attention
342
+ hidden_states, _ = self.self_attn(
343
+ hidden_states=hidden_states,
344
+ position_embeddings=position_embeddings,
345
+ attention_mask=attention_mask,
346
+ position_ids=position_ids,
347
+ past_key_values=past_key_values,
348
+ cache_position=cache_position,
349
+ **kwargs,
350
+ )
351
+ hidden_states = residual + hidden_states
352
+
353
+ # Fully Connected
354
+ residual = hidden_states
355
+ hidden_states = self.post_attention_layernorm(hidden_states)
356
+ hidden_states, _ = self.block_sparse_moe(hidden_states)
357
+ hidden_states = residual + hidden_states
358
+
359
+ return hidden_states
360
+
361
+
362
+ class MiniMaxM2RotaryEmbedding(nn.Module):
363
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
364
+
365
+ def __init__(self, config: MiniMaxM2Config, device=None):
366
+ super().__init__()
367
+ # BC: "rope_type" was originally "type"
368
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
369
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
370
+ else:
371
+ self.rope_type = "default"
372
+ self.max_seq_len_cached = config.max_position_embeddings
373
+ self.original_max_seq_len = config.max_position_embeddings
374
+
375
+ self.config = config
376
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
377
+
378
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
379
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
380
+ self.original_inv_freq = self.inv_freq
381
+
382
+ @torch.no_grad()
383
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
384
+ def forward(self, x, position_ids):
385
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
386
+ position_ids_expanded = position_ids[:, None, :].float()
387
+
388
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
389
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
390
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
391
+ emb = torch.cat((freqs, freqs), dim=-1)
392
+ cos = emb.cos() * self.attention_scaling
393
+ sin = emb.sin() * self.attention_scaling
394
+
395
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
396
+
397
+
398
+ @auto_docstring
399
+ class MiniMaxM2PreTrainedModel(PreTrainedModel):
400
+ config: MiniMaxM2Config
401
+ base_model_prefix = "model"
402
+ supports_gradient_checkpointing = True
403
+ _no_split_modules = ["MiniMaxM2DecoderLayer"]
404
+ _skip_keys_device_placement = ["past_key_values"]
405
+ _supports_flash_attn = True
406
+ _supports_sdpa = True
407
+ _supports_flex_attn = True
408
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
409
+ _supports_attention_backend = True
410
+ _can_record_outputs = {
411
+ "router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1),
412
+ "hidden_states": MiniMaxM2DecoderLayer,
413
+ "attentions": MiniMaxM2Attention,
414
+ }
415
+
416
+
417
+ @auto_docstring
418
+ class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
419
+ def __init__(self, config: MiniMaxM2Config):
420
+ super().__init__(config)
421
+ self.padding_idx = config.pad_token_id
422
+ self.vocab_size = config.vocab_size
423
+
424
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
425
+ self.layers = nn.ModuleList(
426
+ [MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
427
+ )
428
+ self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
429
+ self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config)
430
+ self.gradient_checkpointing = False
431
+
432
+ # Initialize weights and apply final processing
433
+ self.post_init()
434
+
435
+ @check_model_inputs
436
+ @auto_docstring
437
+ def forward(
438
+ self,
439
+ input_ids: Optional[torch.LongTensor] = None,
440
+ attention_mask: Optional[torch.Tensor] = None,
441
+ position_ids: Optional[torch.LongTensor] = None,
442
+ past_key_values: Optional[Cache] = None,
443
+ inputs_embeds: Optional[torch.FloatTensor] = None,
444
+ use_cache: Optional[bool] = None,
445
+ cache_position: Optional[torch.LongTensor] = None,
446
+ **kwargs: Unpack[TransformersKwargs],
447
+ ) -> MoeModelOutputWithPast:
448
+ if (input_ids is None) ^ (inputs_embeds is not None):
449
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
450
+
451
+ if use_cache and past_key_values is None:
452
+ past_key_values = DynamicCache(config=self.config)
453
+
454
+ if inputs_embeds is None:
455
+ inputs_embeds = self.embed_tokens(input_ids)
456
+
457
+ if cache_position is None:
458
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
459
+ cache_position = torch.arange(
460
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
461
+ )
462
+ if position_ids is None:
463
+ position_ids = cache_position.unsqueeze(0)
464
+
465
+ mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
466
+ causal_mask = mask_function(
467
+ config=self.config,
468
+ input_embeds=inputs_embeds,
469
+ attention_mask=attention_mask,
470
+ cache_position=cache_position,
471
+ past_key_values=past_key_values,
472
+ position_ids=position_ids,
473
+ )
474
+
475
+ hidden_states = inputs_embeds
476
+
477
+ # create position embeddings to be shared across the decoder layers
478
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
479
+
480
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
481
+ hidden_states = decoder_layer(
482
+ hidden_states,
483
+ position_embeddings=position_embeddings,
484
+ attention_mask=causal_mask,
485
+ position_ids=position_ids,
486
+ past_key_values=past_key_values,
487
+ use_cache=use_cache,
488
+ cache_position=cache_position,
489
+ **kwargs,
490
+ )
491
+
492
+ hidden_states = self.norm(hidden_states)
493
+
494
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
495
+ last_hidden_state=hidden_states,
496
+ past_key_values=past_key_values,
497
+ )
498
+
499
+
500
+ def load_balancing_loss_func(
501
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
502
+ num_experts: Optional[int] = None,
503
+ top_k=2,
504
+ attention_mask: Optional[torch.Tensor] = None,
505
+ ) -> Union[torch.Tensor, int]:
506
+ r"""
507
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
508
+
509
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
510
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
511
+ experts is too unbalanced.
512
+
513
+ Args:
514
+ gate_logits:
515
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
516
+ shape [batch_size X sequence_length, num_experts].
517
+ num_experts:
518
+ Number of experts
519
+ top_k:
520
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
521
+ parameter.
522
+ attention_mask (`torch.Tensor`, *optional*):
523
+ The attention_mask used in forward function
524
+ shape [batch_size X sequence_length] if not None.
525
+
526
+ Returns:
527
+ The auxiliary loss.
528
+ """
529
+ if gate_logits is None or not isinstance(gate_logits, tuple):
530
+ return 0
531
+
532
+ if isinstance(gate_logits, tuple):
533
+ compute_device = gate_logits[0].device
534
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
535
+
536
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
537
+
538
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
539
+
540
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
541
+
542
+ if attention_mask is None:
543
+ # Compute the percentage of tokens routed to each experts
544
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
545
+
546
+ # Compute the average probability of routing to these experts
547
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
548
+ else:
549
+ batch_size, sequence_length = attention_mask.shape
550
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
551
+
552
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
553
+ expert_attention_mask = (
554
+ attention_mask[None, :, :, None, None]
555
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
556
+ .reshape(-1, top_k, num_experts)
557
+ .to(compute_device)
558
+ )
559
+
560
+ # Compute the percentage of tokens routed to each experts
561
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
562
+ expert_attention_mask, dim=0
563
+ )
564
+
565
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
566
+ router_per_expert_attention_mask = (
567
+ attention_mask[None, :, :, None]
568
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
569
+ .reshape(-1, num_experts)
570
+ .to(compute_device)
571
+ )
572
+
573
+ # Compute the average probability of routing to these experts
574
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
575
+ router_per_expert_attention_mask, dim=0
576
+ )
577
+
578
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
579
+ return overall_loss * num_experts
580
+
581
+
582
+ @auto_docstring
583
+ class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
584
+ _tied_weights_keys = ["lm_head.weight"]
585
+ _tp_plan = {"lm_head": "colwise_rep"}
586
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
587
+
588
+ def __init__(self, config):
589
+ super().__init__(config)
590
+ self.model = MiniMaxM2Model(config)
591
+ self.vocab_size = config.vocab_size
592
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
593
+ self.router_aux_loss_coef = config.router_aux_loss_coef
594
+ self.num_experts = config.num_local_experts
595
+ self.num_experts_per_tok = config.num_experts_per_tok
596
+
597
+ # Initialize weights and apply final processing
598
+ self.post_init()
599
+
600
+ @can_return_tuple
601
+ @auto_docstring
602
+ def forward(
603
+ self,
604
+ input_ids: Optional[torch.LongTensor] = None,
605
+ attention_mask: Optional[torch.Tensor] = None,
606
+ position_ids: Optional[torch.LongTensor] = None,
607
+ past_key_values: Optional[Cache] = None,
608
+ inputs_embeds: Optional[torch.FloatTensor] = None,
609
+ labels: Optional[torch.LongTensor] = None,
610
+ use_cache: Optional[bool] = None,
611
+ output_router_logits: Optional[bool] = None,
612
+ cache_position: Optional[torch.LongTensor] = None,
613
+ logits_to_keep: Union[int, torch.Tensor] = 0,
614
+ **kwargs: Unpack[TransformersKwargs],
615
+ ) -> MoeCausalLMOutputWithPast:
616
+ r"""
617
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
618
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
619
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
620
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
621
+
622
+ Example:
623
+
624
+ ```python
625
+ >>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM
626
+
627
+ >>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
628
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
629
+
630
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
631
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
632
+
633
+ >>> # Generate
634
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
635
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
636
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
637
+ ```"""
638
+
639
+ output_router_logits = (
640
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
641
+ )
642
+
643
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
644
+ outputs: MoeModelOutputWithPast = self.model(
645
+ input_ids=input_ids,
646
+ attention_mask=attention_mask,
647
+ position_ids=position_ids,
648
+ past_key_values=past_key_values,
649
+ inputs_embeds=inputs_embeds,
650
+ use_cache=use_cache,
651
+ output_router_logits=output_router_logits,
652
+ cache_position=cache_position,
653
+ **kwargs,
654
+ )
655
+
656
+ hidden_states = outputs.last_hidden_state
657
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
658
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
659
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
660
+
661
+ loss = None
662
+ if labels is not None:
663
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
664
+
665
+ aux_loss = None
666
+ if output_router_logits:
667
+ aux_loss = load_balancing_loss_func(
668
+ outputs.router_logits,
669
+ self.num_experts,
670
+ self.num_experts_per_tok,
671
+ attention_mask,
672
+ )
673
+ if labels is not None:
674
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
675
+
676
+ return MoeCausalLMOutputWithPast(
677
+ loss=loss,
678
+ aux_loss=aux_loss,
679
+ logits=logits,
680
+ past_key_values=outputs.past_key_values,
681
+ hidden_states=outputs.hidden_states,
682
+ attentions=outputs.attentions,
683
+ router_logits=outputs.router_logits,
684
+ )
685
+
686
+
687
+ class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel):
688
+ pass
689
+
690
+
691
+ class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel):
692
+ pass
693
+
694
+
695
+ class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel):
696
+ pass
697
+
698
+
699
+ __all__ = [
700
+ "MiniMaxM2ForCausalLM",
701
+ "MiniMaxM2ForQuestionAnswering",
702
+ "MiniMaxM2Model",
703
+ "MiniMaxM2PreTrainedModel",
704
+ "MiniMaxM2ForSequenceClassification",
705
+ "MiniMaxM2ForTokenClassification",
706
+ ]
quantization_config.json CHANGED
@@ -4,195 +4,257 @@
4
  "group_size": 128,
5
  "sym": true,
6
  "iters": 0,
7
- "autoround_version": "0.12.0",
8
  "quant_method": "auto-round",
9
  "packing_format": "auto_round:auto_gptq",
10
  "extra_config": {
11
  "model.layers.0.block_sparse_moe.gate": {
12
- "bits": 8
 
13
  },
14
  "model.layers.1.block_sparse_moe.gate": {
15
- "bits": 8
 
16
  },
17
  "model.layers.2.block_sparse_moe.gate": {
18
- "bits": 8
 
19
  },
20
  "model.layers.3.block_sparse_moe.gate": {
21
- "bits": 8
 
22
  },
23
  "model.layers.4.block_sparse_moe.gate": {
24
- "bits": 8
 
25
  },
26
  "model.layers.5.block_sparse_moe.gate": {
27
- "bits": 8
 
28
  },
29
  "model.layers.6.block_sparse_moe.gate": {
30
- "bits": 8
 
31
  },
32
  "model.layers.7.block_sparse_moe.gate": {
33
- "bits": 8
 
34
  },
35
  "model.layers.8.block_sparse_moe.gate": {
36
- "bits": 8
 
37
  },
38
  "model.layers.9.block_sparse_moe.gate": {
39
- "bits": 8
 
40
  },
41
  "model.layers.10.block_sparse_moe.gate": {
42
- "bits": 8
 
43
  },
44
  "model.layers.11.block_sparse_moe.gate": {
45
- "bits": 8
 
46
  },
47
  "model.layers.12.block_sparse_moe.gate": {
48
- "bits": 8
 
49
  },
50
  "model.layers.13.block_sparse_moe.gate": {
51
- "bits": 8
 
52
  },
53
  "model.layers.14.block_sparse_moe.gate": {
54
- "bits": 8
 
55
  },
56
  "model.layers.15.block_sparse_moe.gate": {
57
- "bits": 8
 
58
  },
59
  "model.layers.16.block_sparse_moe.gate": {
60
- "bits": 8
 
61
  },
62
  "model.layers.17.block_sparse_moe.gate": {
63
- "bits": 8
 
64
  },
65
  "model.layers.18.block_sparse_moe.gate": {
66
- "bits": 8
 
67
  },
68
  "model.layers.19.block_sparse_moe.gate": {
69
- "bits": 8
 
70
  },
71
  "model.layers.20.block_sparse_moe.gate": {
72
- "bits": 8
 
73
  },
74
  "model.layers.21.block_sparse_moe.gate": {
75
- "bits": 8
 
76
  },
77
  "model.layers.22.block_sparse_moe.gate": {
78
- "bits": 8
 
79
  },
80
  "model.layers.23.block_sparse_moe.gate": {
81
- "bits": 8
 
82
  },
83
  "model.layers.24.block_sparse_moe.gate": {
84
- "bits": 8
 
85
  },
86
  "model.layers.25.block_sparse_moe.gate": {
87
- "bits": 8
 
88
  },
89
  "model.layers.26.block_sparse_moe.gate": {
90
- "bits": 8
 
91
  },
92
  "model.layers.27.block_sparse_moe.gate": {
93
- "bits": 8
 
94
  },
95
  "model.layers.28.block_sparse_moe.gate": {
96
- "bits": 8
 
97
  },
98
  "model.layers.29.block_sparse_moe.gate": {
99
- "bits": 8
 
100
  },
101
  "model.layers.30.block_sparse_moe.gate": {
102
- "bits": 8
 
103
  },
104
  "model.layers.31.block_sparse_moe.gate": {
105
- "bits": 8
 
106
  },
107
  "model.layers.32.block_sparse_moe.gate": {
108
- "bits": 8
 
109
  },
110
  "model.layers.33.block_sparse_moe.gate": {
111
- "bits": 8
 
112
  },
113
  "model.layers.34.block_sparse_moe.gate": {
114
- "bits": 8
 
115
  },
116
  "model.layers.35.block_sparse_moe.gate": {
117
- "bits": 8
 
118
  },
119
  "model.layers.36.block_sparse_moe.gate": {
120
- "bits": 8
 
121
  },
122
  "model.layers.37.block_sparse_moe.gate": {
123
- "bits": 8
 
124
  },
125
  "model.layers.38.block_sparse_moe.gate": {
126
- "bits": 8
 
127
  },
128
  "model.layers.39.block_sparse_moe.gate": {
129
- "bits": 8
 
130
  },
131
  "model.layers.40.block_sparse_moe.gate": {
132
- "bits": 8
 
133
  },
134
  "model.layers.41.block_sparse_moe.gate": {
135
- "bits": 8
 
136
  },
137
  "model.layers.42.block_sparse_moe.gate": {
138
- "bits": 8
 
139
  },
140
  "model.layers.43.block_sparse_moe.gate": {
141
- "bits": 8
 
142
  },
143
  "model.layers.44.block_sparse_moe.gate": {
144
- "bits": 8
 
145
  },
146
  "model.layers.45.block_sparse_moe.gate": {
147
- "bits": 8
 
148
  },
149
  "model.layers.46.block_sparse_moe.gate": {
150
- "bits": 8
 
151
  },
152
  "model.layers.47.block_sparse_moe.gate": {
153
- "bits": 8
 
154
  },
155
  "model.layers.48.block_sparse_moe.gate": {
156
- "bits": 8
 
157
  },
158
  "model.layers.49.block_sparse_moe.gate": {
159
- "bits": 8
 
160
  },
161
  "model.layers.50.block_sparse_moe.gate": {
162
- "bits": 8
 
163
  },
164
  "model.layers.51.block_sparse_moe.gate": {
165
- "bits": 8
 
166
  },
167
  "model.layers.52.block_sparse_moe.gate": {
168
- "bits": 8
 
169
  },
170
  "model.layers.53.block_sparse_moe.gate": {
171
- "bits": 8
 
172
  },
173
  "model.layers.54.block_sparse_moe.gate": {
174
- "bits": 8
 
175
  },
176
  "model.layers.55.block_sparse_moe.gate": {
177
- "bits": 8
 
178
  },
179
  "model.layers.56.block_sparse_moe.gate": {
180
- "bits": 8
 
181
  },
182
  "model.layers.57.block_sparse_moe.gate": {
183
- "bits": 8
 
184
  },
185
  "model.layers.58.block_sparse_moe.gate": {
186
- "bits": 8
 
187
  },
188
  "model.layers.59.block_sparse_moe.gate": {
189
- "bits": 8
 
190
  },
191
  "model.layers.60.block_sparse_moe.gate": {
192
- "bits": 8
 
193
  },
194
  "model.layers.61.block_sparse_moe.gate": {
195
- "bits": 8
 
196
  }
197
  }
198
  }
 
4
  "group_size": 128,
5
  "sym": true,
6
  "iters": 0,
7
+ "autoround_version": "0.10.0",
8
  "quant_method": "auto-round",
9
  "packing_format": "auto_round:auto_gptq",
10
  "extra_config": {
11
  "model.layers.0.block_sparse_moe.gate": {
12
+ "bits": 16,
13
+ "data_type": "float"
14
  },
15
  "model.layers.1.block_sparse_moe.gate": {
16
+ "bits": 16,
17
+ "data_type": "float"
18
  },
19
  "model.layers.2.block_sparse_moe.gate": {
20
+ "bits": 16,
21
+ "data_type": "float"
22
  },
23
  "model.layers.3.block_sparse_moe.gate": {
24
+ "bits": 16,
25
+ "data_type": "float"
26
  },
27
  "model.layers.4.block_sparse_moe.gate": {
28
+ "bits": 16,
29
+ "data_type": "float"
30
  },
31
  "model.layers.5.block_sparse_moe.gate": {
32
+ "bits": 16,
33
+ "data_type": "float"
34
  },
35
  "model.layers.6.block_sparse_moe.gate": {
36
+ "bits": 16,
37
+ "data_type": "float"
38
  },
39
  "model.layers.7.block_sparse_moe.gate": {
40
+ "bits": 16,
41
+ "data_type": "float"
42
  },
43
  "model.layers.8.block_sparse_moe.gate": {
44
+ "bits": 16,
45
+ "data_type": "float"
46
  },
47
  "model.layers.9.block_sparse_moe.gate": {
48
+ "bits": 16,
49
+ "data_type": "float"
50
  },
51
  "model.layers.10.block_sparse_moe.gate": {
52
+ "bits": 16,
53
+ "data_type": "float"
54
  },
55
  "model.layers.11.block_sparse_moe.gate": {
56
+ "bits": 16,
57
+ "data_type": "float"
58
  },
59
  "model.layers.12.block_sparse_moe.gate": {
60
+ "bits": 16,
61
+ "data_type": "float"
62
  },
63
  "model.layers.13.block_sparse_moe.gate": {
64
+ "bits": 16,
65
+ "data_type": "float"
66
  },
67
  "model.layers.14.block_sparse_moe.gate": {
68
+ "bits": 16,
69
+ "data_type": "float"
70
  },
71
  "model.layers.15.block_sparse_moe.gate": {
72
+ "bits": 16,
73
+ "data_type": "float"
74
  },
75
  "model.layers.16.block_sparse_moe.gate": {
76
+ "bits": 16,
77
+ "data_type": "float"
78
  },
79
  "model.layers.17.block_sparse_moe.gate": {
80
+ "bits": 16,
81
+ "data_type": "float"
82
  },
83
  "model.layers.18.block_sparse_moe.gate": {
84
+ "bits": 16,
85
+ "data_type": "float"
86
  },
87
  "model.layers.19.block_sparse_moe.gate": {
88
+ "bits": 16,
89
+ "data_type": "float"
90
  },
91
  "model.layers.20.block_sparse_moe.gate": {
92
+ "bits": 16,
93
+ "data_type": "float"
94
  },
95
  "model.layers.21.block_sparse_moe.gate": {
96
+ "bits": 16,
97
+ "data_type": "float"
98
  },
99
  "model.layers.22.block_sparse_moe.gate": {
100
+ "bits": 16,
101
+ "data_type": "float"
102
  },
103
  "model.layers.23.block_sparse_moe.gate": {
104
+ "bits": 16,
105
+ "data_type": "float"
106
  },
107
  "model.layers.24.block_sparse_moe.gate": {
108
+ "bits": 16,
109
+ "data_type": "float"
110
  },
111
  "model.layers.25.block_sparse_moe.gate": {
112
+ "bits": 16,
113
+ "data_type": "float"
114
  },
115
  "model.layers.26.block_sparse_moe.gate": {
116
+ "bits": 16,
117
+ "data_type": "float"
118
  },
119
  "model.layers.27.block_sparse_moe.gate": {
120
+ "bits": 16,
121
+ "data_type": "float"
122
  },
123
  "model.layers.28.block_sparse_moe.gate": {
124
+ "bits": 16,
125
+ "data_type": "float"
126
  },
127
  "model.layers.29.block_sparse_moe.gate": {
128
+ "bits": 16,
129
+ "data_type": "float"
130
  },
131
  "model.layers.30.block_sparse_moe.gate": {
132
+ "bits": 16,
133
+ "data_type": "float"
134
  },
135
  "model.layers.31.block_sparse_moe.gate": {
136
+ "bits": 16,
137
+ "data_type": "float"
138
  },
139
  "model.layers.32.block_sparse_moe.gate": {
140
+ "bits": 16,
141
+ "data_type": "float"
142
  },
143
  "model.layers.33.block_sparse_moe.gate": {
144
+ "bits": 16,
145
+ "data_type": "float"
146
  },
147
  "model.layers.34.block_sparse_moe.gate": {
148
+ "bits": 16,
149
+ "data_type": "float"
150
  },
151
  "model.layers.35.block_sparse_moe.gate": {
152
+ "bits": 16,
153
+ "data_type": "float"
154
  },
155
  "model.layers.36.block_sparse_moe.gate": {
156
+ "bits": 16,
157
+ "data_type": "float"
158
  },
159
  "model.layers.37.block_sparse_moe.gate": {
160
+ "bits": 16,
161
+ "data_type": "float"
162
  },
163
  "model.layers.38.block_sparse_moe.gate": {
164
+ "bits": 16,
165
+ "data_type": "float"
166
  },
167
  "model.layers.39.block_sparse_moe.gate": {
168
+ "bits": 16,
169
+ "data_type": "float"
170
  },
171
  "model.layers.40.block_sparse_moe.gate": {
172
+ "bits": 16,
173
+ "data_type": "float"
174
  },
175
  "model.layers.41.block_sparse_moe.gate": {
176
+ "bits": 16,
177
+ "data_type": "float"
178
  },
179
  "model.layers.42.block_sparse_moe.gate": {
180
+ "bits": 16,
181
+ "data_type": "float"
182
  },
183
  "model.layers.43.block_sparse_moe.gate": {
184
+ "bits": 16,
185
+ "data_type": "float"
186
  },
187
  "model.layers.44.block_sparse_moe.gate": {
188
+ "bits": 16,
189
+ "data_type": "float"
190
  },
191
  "model.layers.45.block_sparse_moe.gate": {
192
+ "bits": 16,
193
+ "data_type": "float"
194
  },
195
  "model.layers.46.block_sparse_moe.gate": {
196
+ "bits": 16,
197
+ "data_type": "float"
198
  },
199
  "model.layers.47.block_sparse_moe.gate": {
200
+ "bits": 16,
201
+ "data_type": "float"
202
  },
203
  "model.layers.48.block_sparse_moe.gate": {
204
+ "bits": 16,
205
+ "data_type": "float"
206
  },
207
  "model.layers.49.block_sparse_moe.gate": {
208
+ "bits": 16,
209
+ "data_type": "float"
210
  },
211
  "model.layers.50.block_sparse_moe.gate": {
212
+ "bits": 16,
213
+ "data_type": "float"
214
  },
215
  "model.layers.51.block_sparse_moe.gate": {
216
+ "bits": 16,
217
+ "data_type": "float"
218
  },
219
  "model.layers.52.block_sparse_moe.gate": {
220
+ "bits": 16,
221
+ "data_type": "float"
222
  },
223
  "model.layers.53.block_sparse_moe.gate": {
224
+ "bits": 16,
225
+ "data_type": "float"
226
  },
227
  "model.layers.54.block_sparse_moe.gate": {
228
+ "bits": 16,
229
+ "data_type": "float"
230
  },
231
  "model.layers.55.block_sparse_moe.gate": {
232
+ "bits": 16,
233
+ "data_type": "float"
234
  },
235
  "model.layers.56.block_sparse_moe.gate": {
236
+ "bits": 16,
237
+ "data_type": "float"
238
  },
239
  "model.layers.57.block_sparse_moe.gate": {
240
+ "bits": 16,
241
+ "data_type": "float"
242
  },
243
  "model.layers.58.block_sparse_moe.gate": {
244
+ "bits": 16,
245
+ "data_type": "float"
246
  },
247
  "model.layers.59.block_sparse_moe.gate": {
248
+ "bits": 16,
249
+ "data_type": "float"
250
  },
251
  "model.layers.60.block_sparse_moe.gate": {
252
+ "bits": 16,
253
+ "data_type": "float"
254
  },
255
  "model.layers.61.block_sparse_moe.gate": {
256
+ "bits": 16,
257
+ "data_type": "float"
258
  }
259
  }
260
  }