haoyang-amd commited on
Commit
9b118ac
·
1 Parent(s): ba4fa61
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</minimax:tool_call>": 200053,
3
+ "</think>": 200051,
4
+ "<add_file>": 200036,
5
+ "<code_context>": 200043,
6
+ "<code_interpreter>": 200023,
7
+ "<commit_after>": 200018,
8
+ "<commit_before>": 200016,
9
+ "<commit_message>": 200040,
10
+ "<commit_msg>": 200017,
11
+ "<delete_file>": 200037,
12
+ "<edit_file>": 200039,
13
+ "<empty_output>": 200015,
14
+ "<empty_source_file>": 200041,
15
+ "<file_content>": 200044,
16
+ "<file_sep>": 200049,
17
+ "<filename>": 200006,
18
+ "<filepath>": 200048,
19
+ "<fim_middle>": 200002,
20
+ "<fim_pad>": 200004,
21
+ "<fim_prefix>": 200001,
22
+ "<fim_suffix>": 200003,
23
+ "<function_call>": 200022,
24
+ "<gh_stars>": 200007,
25
+ "<issue_closed>": 200010,
26
+ "<issue_comment>": 200009,
27
+ "<issue_start>": 200008,
28
+ "<jupyter_code>": 200013,
29
+ "<jupyter_error>": 200035,
30
+ "<jupyter_output>": 200014,
31
+ "<jupyter_start>": 200011,
32
+ "<jupyter_text>": 200012,
33
+ "<minimax:tool_call>": 200052,
34
+ "<pr_start>": 200046,
35
+ "<rename_file>": 200038,
36
+ "<repo_struct>": 200042,
37
+ "<reponame>": 200005,
38
+ "<review_comment>": 200047,
39
+ "<source_files>": 200045,
40
+ "<think>": 200050,
41
+ "[e~[": 200020,
42
+ "]!d~[": 200021,
43
+ "]!p~[": 200000,
44
+ "]<]end of image[>[": 200030,
45
+ "]<]end of speech[>[": 200028,
46
+ "]<]end of video[>[": 200032,
47
+ "]<]image[>[": 200025,
48
+ "]<]speech[>[": 200024,
49
+ "]<]start of image[>[": 200029,
50
+ "]<]start of speech[>[": 200027,
51
+ "]<]start of video[>[": 200031,
52
+ "]<]video[>[": 200026,
53
+ "]<]vision pad[>[": 200033,
54
+ "]~!b[": 200034,
55
+ "]~b]": 200019
56
+ }
chat_template.jinja ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# ----------‑‑‑ special token variables ‑‑‑---------- #}
2
+ {%- set toolcall_begin_token = '<minimax:tool_call>' -%}
3
+ {%- set toolcall_end_token = '</minimax:tool_call>' -%}
4
+ {#- Tool Rendering Functions ============================================== -#}
5
+ {%- macro render_tool_namespace(namespace_name, tool_list) -%}
6
+ {%- for tool in tool_list -%}
7
+ <tool>{{ tool.function | tojson(ensure_ascii=False) }}</tool>
8
+ {% endfor -%}
9
+ {%- endmacro -%}
10
+ {%- macro visible_text(content) -%}
11
+ {%- if content is string -%}
12
+ {{ content }}
13
+ {%- elif content is iterable and content is not mapping -%}
14
+ {%- for item in content -%}
15
+ {%- if item is mapping and item.type == 'text' -%}
16
+ {{- item.text }}
17
+ {%- elif item is string -%}
18
+ {{- item }}
19
+ {%- endif -%}
20
+ {%- endfor -%}
21
+ {%- else -%}
22
+ {{- content }}
23
+ {%- endif -%}
24
+ {%- endmacro -%}
25
+ {#- System Message Construction ============================================ -#}
26
+ {%- macro build_system_message(system_message) -%}
27
+ {%- if system_message and system_message.content -%}
28
+ {{- visible_text(system_message.content) }}
29
+ {%- else -%}
30
+ {%- if model_identity is not defined -%}
31
+ {%- set model_identity = "You are a helpful assistant. Your name is MiniMax-M2.7 and is built by MiniMax." -%}
32
+ {%- endif -%}
33
+ {{- model_identity }}
34
+ {%- endif -%}
35
+
36
+ {#- Handle current_date -#}
37
+ {%- if system_message and system_message.current_date -%}
38
+ {{- '\n' ~ 'Current date: ' + system_message.current_date }}
39
+ {%- endif -%}
40
+ {#- Handle current_location -#}
41
+ {%- if system_message and system_message.current_location -%}
42
+ {{- '\n' ~ 'Current location: ' + system_message.current_location }}
43
+ {%- endif -%}
44
+ {%- endmacro -%}
45
+ {#- Main Template Logic ================================================= -#}
46
+ {#- Extract system message (only first message if it's system) -#}
47
+ {%- set system_message = none -%}
48
+ {%- set conversation_messages = messages -%}
49
+ {%- if messages and messages[0].role == "system" -%}
50
+ {%- set system_message = messages[0] -%}
51
+ {%- set conversation_messages = messages[1:] -%}
52
+ {%- endif -%}
53
+ {#- Get the last user message turn, for interleved thinking -#}
54
+ {%- set ns = namespace(last_user_index=-1) %}
55
+ {% for m in conversation_messages %}
56
+ {%- if m.role == 'user' %}
57
+ {% set ns.last_user_index = loop.index0 -%}
58
+ {%- endif %}
59
+ {%- endfor %}
60
+ {#- Render system message -#}
61
+ {{- ']~!b[' ~ ']~b]system' ~ '\n' }}
62
+ {{- build_system_message(system_message) }}
63
+ {#- Render tools if available -#}
64
+ {%- if tools -%}
65
+ {{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
66
+ {{- '\n' ~ '<tools>' ~ '\n' }}
67
+ {{- render_tool_namespace("functions", tools) }}
68
+ {{- '</tools>' ~ '\n\n' }}
69
+ {{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
70
+ {{- '\n' ~ toolcall_begin_token }}
71
+ <invoke name="tool-name-1">
72
+ <parameter name="param-key-1">param-value-1</parameter>
73
+ <parameter name="param-key-2">param-value-2</parameter>
74
+ ...
75
+ </invoke>
76
+ {{- '\n' ~ toolcall_end_token }}
77
+ {%- endif -%}
78
+ {{- '[e~[\n' }}
79
+
80
+ {#- Render messages -#}
81
+ {%- set last_tool_call = namespace(name=none) -%}
82
+ {%- for message in conversation_messages -%}
83
+ {%- if message.role == 'assistant' -%}
84
+ {#- Only render reasoning_content if no user message follows -#}
85
+ {{- ']~b]ai' ~ '\n' }}
86
+
87
+ {%- set reasoning_content = '' %}
88
+ {%- set content = visible_text(message.content) %}
89
+ {%- if message.reasoning_content is string %}
90
+ {%- set reasoning_content = message.reasoning_content %}
91
+ {%- else %}
92
+ {%- if '</think>' in content %}
93
+ {%- set reasoning_content = content.split('</think>')[0].strip('\n').split('<think>')[-1].strip('\n') %}
94
+ {%- set content = content.split('</think>')[-1].strip('\n') %}
95
+ {%- endif %}
96
+ {%- endif %}
97
+ {%- if reasoning_content and loop.index0 > ns.last_user_index -%}
98
+ {{- '<think>' ~ '\n' ~ reasoning_content ~ '\n' ~ '</think>' ~ '\n\n' }}
99
+ {%- endif -%}
100
+ {%- if content -%}
101
+ {{- content }}
102
+ {%- endif -%}
103
+ {%- if message.tool_calls -%}
104
+ {{- '\n' ~ toolcall_begin_token ~ '\n' }}
105
+
106
+ {%- for tool_call in message.tool_calls -%}
107
+ {%- if tool_call.function %}
108
+ {%- set tool_call = tool_call.function %}
109
+ {%- endif %}
110
+ {{- '<invoke name="' + tool_call.name + '">' }}
111
+ {% set _args = tool_call.arguments %}
112
+ {%- for k, v in _args.items() %}
113
+ {{- '<parameter name="' + k + '">' }}
114
+ {{- v | tojson(ensure_ascii=False) if v is not string else v }}
115
+ {{- '</parameter>' }}
116
+ {% endfor %}
117
+ {{- '</invoke>' ~ '\n' }}
118
+ {%- endfor -%}
119
+
120
+ {{- toolcall_end_token}}
121
+ {%- set last_tool_call.name = message.tool_calls[-1].name -%}
122
+ {%- else -%}
123
+ {%- set last_tool_call.name = none -%}
124
+ {%- endif -%}
125
+ {{- '[e~[' ~ '\n' }}
126
+
127
+ {%- elif message.role == 'tool' -%}
128
+ {%- if last_tool_call.name is none -%}
129
+ {{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
130
+ {%- endif -%}
131
+ {%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
132
+ {{- ']~b]tool' }}
133
+ {%- endif -%}
134
+ {%- if message.content is string -%}
135
+ {{- '\n<response>' }}
136
+ {{- message.content }}
137
+ {{- '</response>' }}
138
+ {%- else -%}
139
+ {%- for tr in message.content -%}
140
+ {{- '\n<response>' }}
141
+ {{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
142
+ {{- '\n</response>' }}
143
+ {%- endfor -%}
144
+ {%- endif -%}
145
+ {%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
146
+ {{- '[e~[\n' -}}
147
+ {%- endif -%}
148
+
149
+ {%- elif message.role == 'user' -%}
150
+ {{- ']~b]user' ~ '\n' }}
151
+ {{- visible_text(message.content) }}
152
+ {{- '[e~[' ~ '\n' }}
153
+ {%- endif -%}
154
+ {%- endfor -%}
155
+
156
+ {#- Generation prompt -#}
157
+ {%- if add_generation_prompt -%}
158
+ {{- ']~b]ai' ~ '\n' ~ '<think>' ~ '\n' }}
159
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MiniMaxM2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "attn_type_list": [
7
+ 1,
8
+ 1,
9
+ 1,
10
+ 1,
11
+ 1,
12
+ 1,
13
+ 1,
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 1,
18
+ 1,
19
+ 1,
20
+ 1,
21
+ 1,
22
+ 1,
23
+ 1,
24
+ 1,
25
+ 1,
26
+ 1,
27
+ 1,
28
+ 1,
29
+ 1,
30
+ 1,
31
+ 1,
32
+ 1,
33
+ 1,
34
+ 1,
35
+ 1,
36
+ 1,
37
+ 1,
38
+ 1,
39
+ 1,
40
+ 1,
41
+ 1,
42
+ 1,
43
+ 1,
44
+ 1,
45
+ 1,
46
+ 1,
47
+ 1,
48
+ 1,
49
+ 1,
50
+ 1,
51
+ 1,
52
+ 1,
53
+ 1,
54
+ 1,
55
+ 1,
56
+ 1,
57
+ 1,
58
+ 1,
59
+ 1,
60
+ 1,
61
+ 1,
62
+ 1,
63
+ 1,
64
+ 1,
65
+ 1,
66
+ 1,
67
+ 1,
68
+ 1
69
+ ],
70
+ "auto_map": {
71
+ "AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
72
+ "AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
73
+ },
74
+ "bos_token_id": 1,
75
+ "dtype": "bfloat16",
76
+ "eos_token_id": 2,
77
+ "head_dim": 128,
78
+ "hidden_act": "silu",
79
+ "hidden_size": 3072,
80
+ "initializer_range": 0.02,
81
+ "intermediate_size": 1536,
82
+ "max_position_embeddings": 204800,
83
+ "model_type": "minimax_m2",
84
+ "mtp_transformer_layers": 1,
85
+ "num_attention_heads": 48,
86
+ "num_experts_per_tok": 8,
87
+ "num_hidden_layers": 62,
88
+ "num_key_value_heads": 8,
89
+ "num_local_experts": 256,
90
+ "num_mtp_modules": 3,
91
+ "output_router_logits": false,
92
+ "partial_rotary_factor": 0.5,
93
+ "qk_norm_type": "per_layer",
94
+ "quantization_config": {
95
+ "algo_config": null,
96
+ "exclude": [
97
+ "model.layers.0.self_attn.q_proj",
98
+ "model.layers.0.self_attn.k_proj",
99
+ "model.layers.0.self_attn.v_proj",
100
+ "model.layers.0.self_attn.o_proj",
101
+ "model.layers.0.block_sparse_moe.gate",
102
+ "model.layers.1.self_attn.q_proj",
103
+ "model.layers.1.self_attn.k_proj",
104
+ "model.layers.1.self_attn.v_proj",
105
+ "model.layers.1.self_attn.o_proj",
106
+ "model.layers.1.block_sparse_moe.gate",
107
+ "model.layers.2.self_attn.q_proj",
108
+ "model.layers.2.self_attn.k_proj",
109
+ "model.layers.2.self_attn.v_proj",
110
+ "model.layers.2.self_attn.o_proj",
111
+ "model.layers.2.block_sparse_moe.gate",
112
+ "model.layers.3.self_attn.q_proj",
113
+ "model.layers.3.self_attn.k_proj",
114
+ "model.layers.3.self_attn.v_proj",
115
+ "model.layers.3.self_attn.o_proj",
116
+ "model.layers.3.block_sparse_moe.gate",
117
+ "model.layers.4.self_attn.q_proj",
118
+ "model.layers.4.self_attn.k_proj",
119
+ "model.layers.4.self_attn.v_proj",
120
+ "model.layers.4.self_attn.o_proj",
121
+ "model.layers.4.block_sparse_moe.gate",
122
+ "model.layers.5.self_attn.q_proj",
123
+ "model.layers.5.self_attn.k_proj",
124
+ "model.layers.5.self_attn.v_proj",
125
+ "model.layers.5.self_attn.o_proj",
126
+ "model.layers.5.block_sparse_moe.gate",
127
+ "model.layers.6.self_attn.q_proj",
128
+ "model.layers.6.self_attn.k_proj",
129
+ "model.layers.6.self_attn.v_proj",
130
+ "model.layers.6.self_attn.o_proj",
131
+ "model.layers.6.block_sparse_moe.gate",
132
+ "model.layers.7.self_attn.q_proj",
133
+ "model.layers.7.self_attn.k_proj",
134
+ "model.layers.7.self_attn.v_proj",
135
+ "model.layers.7.self_attn.o_proj",
136
+ "model.layers.7.block_sparse_moe.gate",
137
+ "model.layers.8.self_attn.q_proj",
138
+ "model.layers.8.self_attn.k_proj",
139
+ "model.layers.8.self_attn.v_proj",
140
+ "model.layers.8.self_attn.o_proj",
141
+ "model.layers.8.block_sparse_moe.gate",
142
+ "model.layers.9.self_attn.q_proj",
143
+ "model.layers.9.self_attn.k_proj",
144
+ "model.layers.9.self_attn.v_proj",
145
+ "model.layers.9.self_attn.o_proj",
146
+ "model.layers.9.block_sparse_moe.gate",
147
+ "model.layers.10.self_attn.q_proj",
148
+ "model.layers.10.self_attn.k_proj",
149
+ "model.layers.10.self_attn.v_proj",
150
+ "model.layers.10.self_attn.o_proj",
151
+ "model.layers.10.block_sparse_moe.gate",
152
+ "model.layers.11.self_attn.q_proj",
153
+ "model.layers.11.self_attn.k_proj",
154
+ "model.layers.11.self_attn.v_proj",
155
+ "model.layers.11.self_attn.o_proj",
156
+ "model.layers.11.block_sparse_moe.gate",
157
+ "model.layers.12.self_attn.q_proj",
158
+ "model.layers.12.self_attn.k_proj",
159
+ "model.layers.12.self_attn.v_proj",
160
+ "model.layers.12.self_attn.o_proj",
161
+ "model.layers.12.block_sparse_moe.gate",
162
+ "model.layers.13.self_attn.q_proj",
163
+ "model.layers.13.self_attn.k_proj",
164
+ "model.layers.13.self_attn.v_proj",
165
+ "model.layers.13.self_attn.o_proj",
166
+ "model.layers.13.block_sparse_moe.gate",
167
+ "model.layers.14.self_attn.q_proj",
168
+ "model.layers.14.self_attn.k_proj",
169
+ "model.layers.14.self_attn.v_proj",
170
+ "model.layers.14.self_attn.o_proj",
171
+ "model.layers.14.block_sparse_moe.gate",
172
+ "model.layers.15.self_attn.q_proj",
173
+ "model.layers.15.self_attn.k_proj",
174
+ "model.layers.15.self_attn.v_proj",
175
+ "model.layers.15.self_attn.o_proj",
176
+ "model.layers.15.block_sparse_moe.gate",
177
+ "model.layers.16.self_attn.q_proj",
178
+ "model.layers.16.self_attn.k_proj",
179
+ "model.layers.16.self_attn.v_proj",
180
+ "model.layers.16.self_attn.o_proj",
181
+ "model.layers.16.block_sparse_moe.gate",
182
+ "model.layers.17.self_attn.q_proj",
183
+ "model.layers.17.self_attn.k_proj",
184
+ "model.layers.17.self_attn.v_proj",
185
+ "model.layers.17.self_attn.o_proj",
186
+ "model.layers.17.block_sparse_moe.gate",
187
+ "model.layers.18.self_attn.q_proj",
188
+ "model.layers.18.self_attn.k_proj",
189
+ "model.layers.18.self_attn.v_proj",
190
+ "model.layers.18.self_attn.o_proj",
191
+ "model.layers.18.block_sparse_moe.gate",
192
+ "model.layers.19.self_attn.q_proj",
193
+ "model.layers.19.self_attn.k_proj",
194
+ "model.layers.19.self_attn.v_proj",
195
+ "model.layers.19.self_attn.o_proj",
196
+ "model.layers.19.block_sparse_moe.gate",
197
+ "model.layers.20.self_attn.q_proj",
198
+ "model.layers.20.self_attn.k_proj",
199
+ "model.layers.20.self_attn.v_proj",
200
+ "model.layers.20.self_attn.o_proj",
201
+ "model.layers.20.block_sparse_moe.gate",
202
+ "model.layers.21.self_attn.q_proj",
203
+ "model.layers.21.self_attn.k_proj",
204
+ "model.layers.21.self_attn.v_proj",
205
+ "model.layers.21.self_attn.o_proj",
206
+ "model.layers.21.block_sparse_moe.gate",
207
+ "model.layers.22.self_attn.q_proj",
208
+ "model.layers.22.self_attn.k_proj",
209
+ "model.layers.22.self_attn.v_proj",
210
+ "model.layers.22.self_attn.o_proj",
211
+ "model.layers.22.block_sparse_moe.gate",
212
+ "model.layers.23.self_attn.q_proj",
213
+ "model.layers.23.self_attn.k_proj",
214
+ "model.layers.23.self_attn.v_proj",
215
+ "model.layers.23.self_attn.o_proj",
216
+ "model.layers.23.block_sparse_moe.gate",
217
+ "model.layers.24.self_attn.q_proj",
218
+ "model.layers.24.self_attn.k_proj",
219
+ "model.layers.24.self_attn.v_proj",
220
+ "model.layers.24.self_attn.o_proj",
221
+ "model.layers.24.block_sparse_moe.gate",
222
+ "model.layers.25.self_attn.q_proj",
223
+ "model.layers.25.self_attn.k_proj",
224
+ "model.layers.25.self_attn.v_proj",
225
+ "model.layers.25.self_attn.o_proj",
226
+ "model.layers.25.block_sparse_moe.gate",
227
+ "model.layers.26.self_attn.q_proj",
228
+ "model.layers.26.self_attn.k_proj",
229
+ "model.layers.26.self_attn.v_proj",
230
+ "model.layers.26.self_attn.o_proj",
231
+ "model.layers.26.block_sparse_moe.gate",
232
+ "model.layers.27.self_attn.q_proj",
233
+ "model.layers.27.self_attn.k_proj",
234
+ "model.layers.27.self_attn.v_proj",
235
+ "model.layers.27.self_attn.o_proj",
236
+ "model.layers.27.block_sparse_moe.gate",
237
+ "model.layers.28.self_attn.q_proj",
238
+ "model.layers.28.self_attn.k_proj",
239
+ "model.layers.28.self_attn.v_proj",
240
+ "model.layers.28.self_attn.o_proj",
241
+ "model.layers.28.block_sparse_moe.gate",
242
+ "model.layers.29.self_attn.q_proj",
243
+ "model.layers.29.self_attn.k_proj",
244
+ "model.layers.29.self_attn.v_proj",
245
+ "model.layers.29.self_attn.o_proj",
246
+ "model.layers.29.block_sparse_moe.gate",
247
+ "model.layers.30.self_attn.q_proj",
248
+ "model.layers.30.self_attn.k_proj",
249
+ "model.layers.30.self_attn.v_proj",
250
+ "model.layers.30.self_attn.o_proj",
251
+ "model.layers.30.block_sparse_moe.gate",
252
+ "model.layers.31.self_attn.q_proj",
253
+ "model.layers.31.self_attn.k_proj",
254
+ "model.layers.31.self_attn.v_proj",
255
+ "model.layers.31.self_attn.o_proj",
256
+ "model.layers.31.block_sparse_moe.gate",
257
+ "model.layers.32.self_attn.q_proj",
258
+ "model.layers.32.self_attn.k_proj",
259
+ "model.layers.32.self_attn.v_proj",
260
+ "model.layers.32.self_attn.o_proj",
261
+ "model.layers.32.block_sparse_moe.gate",
262
+ "model.layers.33.self_attn.q_proj",
263
+ "model.layers.33.self_attn.k_proj",
264
+ "model.layers.33.self_attn.v_proj",
265
+ "model.layers.33.self_attn.o_proj",
266
+ "model.layers.33.block_sparse_moe.gate",
267
+ "model.layers.34.self_attn.q_proj",
268
+ "model.layers.34.self_attn.k_proj",
269
+ "model.layers.34.self_attn.v_proj",
270
+ "model.layers.34.self_attn.o_proj",
271
+ "model.layers.34.block_sparse_moe.gate",
272
+ "model.layers.35.self_attn.q_proj",
273
+ "model.layers.35.self_attn.k_proj",
274
+ "model.layers.35.self_attn.v_proj",
275
+ "model.layers.35.self_attn.o_proj",
276
+ "model.layers.35.block_sparse_moe.gate",
277
+ "model.layers.36.self_attn.q_proj",
278
+ "model.layers.36.self_attn.k_proj",
279
+ "model.layers.36.self_attn.v_proj",
280
+ "model.layers.36.self_attn.o_proj",
281
+ "model.layers.36.block_sparse_moe.gate",
282
+ "model.layers.37.self_attn.q_proj",
283
+ "model.layers.37.self_attn.k_proj",
284
+ "model.layers.37.self_attn.v_proj",
285
+ "model.layers.37.self_attn.o_proj",
286
+ "model.layers.37.block_sparse_moe.gate",
287
+ "model.layers.38.self_attn.q_proj",
288
+ "model.layers.38.self_attn.k_proj",
289
+ "model.layers.38.self_attn.v_proj",
290
+ "model.layers.38.self_attn.o_proj",
291
+ "model.layers.38.block_sparse_moe.gate",
292
+ "model.layers.39.self_attn.q_proj",
293
+ "model.layers.39.self_attn.k_proj",
294
+ "model.layers.39.self_attn.v_proj",
295
+ "model.layers.39.self_attn.o_proj",
296
+ "model.layers.39.block_sparse_moe.gate",
297
+ "model.layers.40.self_attn.q_proj",
298
+ "model.layers.40.self_attn.k_proj",
299
+ "model.layers.40.self_attn.v_proj",
300
+ "model.layers.40.self_attn.o_proj",
301
+ "model.layers.40.block_sparse_moe.gate",
302
+ "model.layers.41.self_attn.q_proj",
303
+ "model.layers.41.self_attn.k_proj",
304
+ "model.layers.41.self_attn.v_proj",
305
+ "model.layers.41.self_attn.o_proj",
306
+ "model.layers.41.block_sparse_moe.gate",
307
+ "model.layers.42.self_attn.q_proj",
308
+ "model.layers.42.self_attn.k_proj",
309
+ "model.layers.42.self_attn.v_proj",
310
+ "model.layers.42.self_attn.o_proj",
311
+ "model.layers.42.block_sparse_moe.gate",
312
+ "model.layers.43.self_attn.q_proj",
313
+ "model.layers.43.self_attn.k_proj",
314
+ "model.layers.43.self_attn.v_proj",
315
+ "model.layers.43.self_attn.o_proj",
316
+ "model.layers.43.block_sparse_moe.gate",
317
+ "model.layers.44.self_attn.q_proj",
318
+ "model.layers.44.self_attn.k_proj",
319
+ "model.layers.44.self_attn.v_proj",
320
+ "model.layers.44.self_attn.o_proj",
321
+ "model.layers.44.block_sparse_moe.gate",
322
+ "model.layers.45.self_attn.q_proj",
323
+ "model.layers.45.self_attn.k_proj",
324
+ "model.layers.45.self_attn.v_proj",
325
+ "model.layers.45.self_attn.o_proj",
326
+ "model.layers.45.block_sparse_moe.gate",
327
+ "model.layers.46.self_attn.q_proj",
328
+ "model.layers.46.self_attn.k_proj",
329
+ "model.layers.46.self_attn.v_proj",
330
+ "model.layers.46.self_attn.o_proj",
331
+ "model.layers.46.block_sparse_moe.gate",
332
+ "model.layers.47.self_attn.q_proj",
333
+ "model.layers.47.self_attn.k_proj",
334
+ "model.layers.47.self_attn.v_proj",
335
+ "model.layers.47.self_attn.o_proj",
336
+ "model.layers.47.block_sparse_moe.gate",
337
+ "model.layers.48.self_attn.q_proj",
338
+ "model.layers.48.self_attn.k_proj",
339
+ "model.layers.48.self_attn.v_proj",
340
+ "model.layers.48.self_attn.o_proj",
341
+ "model.layers.48.block_sparse_moe.gate",
342
+ "model.layers.49.self_attn.q_proj",
343
+ "model.layers.49.self_attn.k_proj",
344
+ "model.layers.49.self_attn.v_proj",
345
+ "model.layers.49.self_attn.o_proj",
346
+ "model.layers.49.block_sparse_moe.gate",
347
+ "model.layers.50.self_attn.q_proj",
348
+ "model.layers.50.self_attn.k_proj",
349
+ "model.layers.50.self_attn.v_proj",
350
+ "model.layers.50.self_attn.o_proj",
351
+ "model.layers.50.block_sparse_moe.gate",
352
+ "model.layers.51.self_attn.q_proj",
353
+ "model.layers.51.self_attn.k_proj",
354
+ "model.layers.51.self_attn.v_proj",
355
+ "model.layers.51.self_attn.o_proj",
356
+ "model.layers.51.block_sparse_moe.gate",
357
+ "model.layers.52.self_attn.q_proj",
358
+ "model.layers.52.self_attn.k_proj",
359
+ "model.layers.52.self_attn.v_proj",
360
+ "model.layers.52.self_attn.o_proj",
361
+ "model.layers.52.block_sparse_moe.gate",
362
+ "model.layers.53.self_attn.q_proj",
363
+ "model.layers.53.self_attn.k_proj",
364
+ "model.layers.53.self_attn.v_proj",
365
+ "model.layers.53.self_attn.o_proj",
366
+ "model.layers.53.block_sparse_moe.gate",
367
+ "model.layers.54.self_attn.q_proj",
368
+ "model.layers.54.self_attn.k_proj",
369
+ "model.layers.54.self_attn.v_proj",
370
+ "model.layers.54.self_attn.o_proj",
371
+ "model.layers.54.block_sparse_moe.gate",
372
+ "model.layers.55.self_attn.q_proj",
373
+ "model.layers.55.self_attn.k_proj",
374
+ "model.layers.55.self_attn.v_proj",
375
+ "model.layers.55.self_attn.o_proj",
376
+ "model.layers.55.block_sparse_moe.gate",
377
+ "model.layers.56.self_attn.q_proj",
378
+ "model.layers.56.self_attn.k_proj",
379
+ "model.layers.56.self_attn.v_proj",
380
+ "model.layers.56.self_attn.o_proj",
381
+ "model.layers.56.block_sparse_moe.gate",
382
+ "model.layers.57.self_attn.q_proj",
383
+ "model.layers.57.self_attn.k_proj",
384
+ "model.layers.57.self_attn.v_proj",
385
+ "model.layers.57.self_attn.o_proj",
386
+ "model.layers.57.block_sparse_moe.gate",
387
+ "model.layers.58.self_attn.q_proj",
388
+ "model.layers.58.self_attn.k_proj",
389
+ "model.layers.58.self_attn.v_proj",
390
+ "model.layers.58.self_attn.o_proj",
391
+ "model.layers.58.block_sparse_moe.gate",
392
+ "model.layers.59.self_attn.q_proj",
393
+ "model.layers.59.self_attn.k_proj",
394
+ "model.layers.59.self_attn.v_proj",
395
+ "model.layers.59.self_attn.o_proj",
396
+ "model.layers.59.block_sparse_moe.gate",
397
+ "model.layers.60.self_attn.q_proj",
398
+ "model.layers.60.self_attn.k_proj",
399
+ "model.layers.60.self_attn.v_proj",
400
+ "model.layers.60.self_attn.o_proj",
401
+ "model.layers.60.block_sparse_moe.gate",
402
+ "model.layers.61.self_attn.q_proj",
403
+ "model.layers.61.self_attn.k_proj",
404
+ "model.layers.61.self_attn.v_proj",
405
+ "model.layers.61.self_attn.o_proj",
406
+ "model.layers.61.block_sparse_moe.gate",
407
+ "lm_head"
408
+ ],
409
+ "export": {
410
+ "kv_cache_group": [],
411
+ "min_kv_scale": 0.0,
412
+ "pack_method": "reorder",
413
+ "weight_format": "real_quantized",
414
+ "weight_merge_groups": null
415
+ },
416
+ "global_quant_config": {
417
+ "bias": null,
418
+ "input_tensors": {
419
+ "block_size": null,
420
+ "ch_axis": -1,
421
+ "dtype": "fp4",
422
+ "enable_buffer_reuse": false,
423
+ "group_size": 32,
424
+ "is_dynamic": true,
425
+ "is_scale_quant": false,
426
+ "max_input_numel": 4194304,
427
+ "mx_element_dtype": null,
428
+ "observer_cls": "PerBlockMXObserver",
429
+ "qscheme": "per_group",
430
+ "round_method": "half_even",
431
+ "scale_calculation_mode": "even",
432
+ "scale_format": "e8m0",
433
+ "scale_type": "float",
434
+ "symmetric": null
435
+ },
436
+ "output_tensors": null,
437
+ "target_device": null,
438
+ "weight": {
439
+ "block_size": null,
440
+ "ch_axis": -1,
441
+ "dtype": "fp4",
442
+ "enable_buffer_reuse": false,
443
+ "group_size": 32,
444
+ "is_dynamic": false,
445
+ "is_scale_quant": false,
446
+ "max_input_numel": 4194304,
447
+ "mx_element_dtype": null,
448
+ "observer_cls": "PerBlockMXObserver",
449
+ "qscheme": "per_group",
450
+ "round_method": "half_even",
451
+ "scale_calculation_mode": "even",
452
+ "scale_format": "e8m0",
453
+ "scale_type": "float",
454
+ "symmetric": null
455
+ }
456
+ },
457
+ "kv_cache_post_rope": false,
458
+ "kv_cache_quant_config": {},
459
+ "layer_quant_config": {},
460
+ "layer_type_quant_config": {},
461
+ "quant_method": "quark",
462
+ "quant_mode": "eager_mode",
463
+ "softmax_quant_spec": null,
464
+ "version": "0.12+810893dea1"
465
+ },
466
+ "rms_norm_eps": 1e-06,
467
+ "rope_theta": 5000000,
468
+ "rotary_dim": 64,
469
+ "router_aux_loss_coef": 0.001,
470
+ "router_jitter_noise": 0.0,
471
+ "scoring_func": "sigmoid",
472
+ "shared_intermediate_size": 0,
473
+ "sliding_window": null,
474
+ "tie_word_embeddings": false,
475
+ "transformers_version": "4.57.1",
476
+ "use_cache": true,
477
+ "use_mtp": true,
478
+ "use_qk_norm": true,
479
+ "use_routing_bias": true,
480
+ "vocab_size": 200064
481
+ }
configuration_minimax_m2.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from transformers.configuration_utils import PretrainedConfig
24
+
25
+
26
+ class MiniMaxM2Config(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`MiniMaxM2Model`]. It is used to instantiate an
29
+ MiniMaxM2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
30
+ with the defaults will yield a similar configuration to that of the MiniMaxM2-7B-v0.1 or MiniMaxM2-7B-Instruct-v0.1.
31
+
32
+ [minimax_m2ai/MiniMaxM2-8x7B](https://huggingface.co/minimax_m2ai/MiniMaxM2-8x7B)
33
+ [minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1](https://huggingface.co/minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1)
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 32000):
41
+ Vocabulary size of the MiniMaxM2 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`MiniMaxM2Model`]
43
+ hidden_size (`int`, *optional*, defaults to 4096):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 14336):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 8):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details, check out [this
57
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
58
+ head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
59
+ The attention head dimension.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
63
+ The maximum sequence length that this model might ever be used with. MiniMaxM2's sliding window attention
64
+ allows sequence of up to 4096*32 tokens.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ pad_token_id (`int`, *optional*):
73
+ The id of the padding token.
74
+ bos_token_id (`int`, *optional*, defaults to 1):
75
+ The id of the "beginning-of-sequence" token.
76
+ eos_token_id (`int`, *optional*, defaults to 2):
77
+ The id of the "end-of-sequence" token.
78
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
79
+ Whether the model's input and output word embeddings should be tied.
80
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
81
+ The base period of the RoPE embeddings.
82
+ sliding_window (`int`, *optional*):
83
+ Sliding window attention window size. If not specified, will default to `4096`.
84
+ attention_dropout (`float`, *optional*, defaults to 0.0):
85
+ The dropout ratio for the attention probabilities.
86
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
87
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
88
+ parameter
89
+ num_local_experts (`int`, *optional*, defaults to 8):
90
+ Number of experts per Sparse MLP layer.
91
+ output_router_logits (`bool`, *optional*, defaults to `False`):
92
+ Whether or not the router logits should be returned by the model. Enabling this will also
93
+ allow the model to output the auxiliary loss. See [here]() for more details
94
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
95
+ The aux loss factor for the total loss.
96
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
97
+ Amount of noise to add to the router.
98
+
99
+ ```python
100
+ >>> from transformers import MiniMaxM2Model, MiniMaxM2Config
101
+
102
+ >>> # Initializing a MiniMaxM2 7B style configuration
103
+ >>> configuration = MiniMaxM2Config()
104
+
105
+ >>> # Initializing a model from the MiniMaxM2 7B style configuration
106
+ >>> model = MiniMaxM2Model(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = "minimax_m2"
113
+ keys_to_ignore_at_inference = ["past_key_values"]
114
+ base_model_tp_plan = {
115
+ "layers.*.self_attn.q_proj": "colwise",
116
+ "layers.*.self_attn.k_proj": "colwise",
117
+ "layers.*.self_attn.v_proj": "colwise",
118
+ "layers.*.self_attn.o_proj": "rowwise",
119
+ "layers.*.block_sparse_moe.gate": "colwise_rep", # we need to replicate here to correctly route experts
120
+ "layers.*.block_sparse_moe.experts.*.w1": "colwise",
121
+ "layers.*.block_sparse_moe.experts.*.w2": "rowwise",
122
+ "layers.*.block_sparse_moe.experts.*.w3": "colwise",
123
+ }
124
+ base_model_pp_plan = {
125
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
126
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
127
+ "norm": (["hidden_states"], ["hidden_states"]),
128
+ }
129
+
130
+ def __init__(
131
+ self,
132
+ vocab_size=32000,
133
+ hidden_size=4096,
134
+ intermediate_size=14336,
135
+ num_hidden_layers=32,
136
+ num_attention_heads=32,
137
+ num_key_value_heads=8,
138
+ head_dim=None,
139
+ hidden_act="silu",
140
+ max_position_embeddings=4096 * 32,
141
+ initializer_range=0.02,
142
+ rms_norm_eps=1e-5,
143
+ use_cache=True,
144
+ pad_token_id=None,
145
+ bos_token_id=1,
146
+ eos_token_id=2,
147
+ tie_word_embeddings=False,
148
+ rope_theta=1e6,
149
+ sliding_window=None,
150
+ attention_dropout=0.0,
151
+ num_experts_per_tok=2,
152
+ num_local_experts=8,
153
+ output_router_logits=False,
154
+ router_aux_loss_coef=0.001,
155
+ router_jitter_noise=0.0,
156
+ **kwargs,
157
+ ):
158
+ self.vocab_size = vocab_size
159
+ self.max_position_embeddings = max_position_embeddings
160
+ self.hidden_size = hidden_size
161
+ self.intermediate_size = intermediate_size
162
+ self.num_hidden_layers = num_hidden_layers
163
+ self.num_attention_heads = num_attention_heads
164
+ self.sliding_window = sliding_window
165
+
166
+ # for backward compatibility
167
+ if num_key_value_heads is None:
168
+ num_key_value_heads = num_attention_heads
169
+
170
+ self.num_key_value_heads = num_key_value_heads
171
+ self.hidden_act = hidden_act
172
+ self.initializer_range = initializer_range
173
+ self.rms_norm_eps = rms_norm_eps
174
+ self.use_cache = use_cache
175
+ self.rope_theta = rope_theta
176
+ self.attention_dropout = attention_dropout
177
+ self.head_dim = head_dim
178
+
179
+ self.num_experts_per_tok = num_experts_per_tok
180
+ self.num_local_experts = num_local_experts
181
+ self.output_router_logits = output_router_logits
182
+ self.router_aux_loss_coef = router_aux_loss_coef
183
+ self.router_jitter_noise = router_jitter_noise
184
+
185
+ self.use_qk_norm = kwargs.pop("use_qk_norm", False)
186
+ self.rotary_dim = kwargs.pop("rotary_dim", self.head_dim)
187
+ self.partial_rotary_factor = kwargs.pop("partial_rotary_factor", 1)
188
+ if self.head_dim is not None:
189
+ self.partial_rotary_factor = self.rotary_dim / self.head_dim
190
+
191
+ super().__init__(
192
+ pad_token_id=pad_token_id,
193
+ bos_token_id=bos_token_id,
194
+ eos_token_id=eos_token_id,
195
+ tie_word_embeddings=tie_word_embeddings,
196
+ **kwargs,
197
+ )
198
+
199
+
200
+ __all__ = ["MiniMaxM2Config"]
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 200019,
3
+ "do_sample": true,
4
+ "eos_token_id": 200020,
5
+ "top_k": 40,
6
+ "top_p": 0.95,
7
+ "transformers_version": "4.57.1"
8
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75a9ef89d7a44e6e3216d80500a15aed38f072d8302727b1e6c828a3cec2deae
3
+ size 4998589064
model-00002-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:963a7a4350cc1af3124084c66e7096af1eaccbf3936a8390ff166e561c387579
3
+ size 4999780104
model-00003-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a36a69df9ae619ac244aa3913da9fa81e139acc5d77bef16fc916d777ac736b7
3
+ size 5000351440
model-00004-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7cfdfd5baa90b230a3b6807a8205a2052f9251f11dbaa12683a20ebb40c926f
3
+ size 4999780160
model-00005-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d618546458737a233c2adab07198781e9059375aee1f6e94e01ea62a55eee0a9
3
+ size 5000354152
model-00006-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4205aed4bcd45ded1274b6783087187bb1362c0fc4d2ba517d9c5810c019f4f0
3
+ size 4999784024
model-00007-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0290ed63f15cb69e23f51bf3ff734b8736bc47b69051765541372c6cca57489
3
+ size 5000355184
model-00008-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f7618160636088d0b1a2b71b413b2055fdc1b1cc6e8022b12b00b8255736f97
3
+ size 4999784088
model-00009-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b9f6f73d30b2d4fcc8d638d80f0011c6deaed84a68f9074b7ef31fba0d7f37b
3
+ size 5000355120
model-00010-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe872e3db3a12e1f2a7e384dc32ea79e4a05b3f5c633a14ce9ab8814f87b559
3
+ size 4999784144
model-00011-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b8799395e17c4e8547134b117ef409e803ff56d66c87bfb805e964b97861a59
3
+ size 5000355064
model-00012-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:030952da704e667a5427f8e26f10e1d2daa80dea85bbf9fe722dcb5f497ef1de
3
+ size 4999784208
model-00013-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ecbd686eb528c65490f3bd3cd78c5139961ec50f1ff6dd97deb4955201a6673
3
+ size 5000355000
model-00014-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dc42719f1ee2b340aa03a476a222debf4e3e83f4231e3af1235e61265713994
3
+ size 4999784264
model-00015-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e77c297a5fd803afbfcfbede8ab923e1bf99de1d8f8ceb5528f9495ba34384d
3
+ size 5000354944
model-00016-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:961a980c385150fb13f5b8070a77d4a06c700d8dbdda3a5aa48868caad4b9b67
3
+ size 4999784328
model-00017-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94243cdd2c2aea91f45c6ab6d8272c23b2771ffd4310db85493056c35529d1c1
3
+ size 5000354880
model-00018-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:936c8ed0a732725ceed582b76adff60881d9c74b4b3f8c5ad52fc192397de17e
3
+ size 4999784432
model-00019-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:119e87d60e68ac8af3efd34f2173a693ad35486d96878fa2665770419bf2aee6
3
+ size 5000354776
model-00020-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37d48eb5ddab040e18c38387fb785f52fae00ee2eb52dbe217857383ca3c2d06
3
+ size 4967776696
model-00021-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1b8aa2f88570146428b7a39dcba5c26580bf3f589dace29626a4d7897e4c854
3
+ size 4999771296
model-00022-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8948d43f158ed5c0444dc619c205cc0dcaf8f3c2bb0f78a6cd776c7e87d11ae
3
+ size 5000355408
model-00023-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55b9b91ff87f9b158135cce930df49dea8951fa31a46ede9dc667447ce6d6e1b
3
+ size 4999783800
model-00024-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:130b9fe31507d1d052d28bd76f3b4b9a314875c3ddf2a4d943bc1d403b995863
3
+ size 5000355408
model-00025-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff904e227595bdd758aedc40913d8e9f9810362030987f7f16281486c0ff656
3
+ size 4999783816
model-00026-of-00026.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6eca0175f4a0b91f57af7a7557c8ffb11fa8a76111a0418b71654abbe878d48
3
+ size 2425059512
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_minimax_m2.py ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from collections.abc import Callable
24
+ from typing import Optional, Union, Unpack
25
+
26
+ import torch
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.generation import GenerationMixin
32
+ try:
33
+ from transformers.integrations import use_kernel_forward_from_hub
34
+ except ImportError:
35
+ # Fallback for older Transformers versions without this integration hook.
36
+ def use_kernel_forward_from_hub(*args, **kwargs):
37
+ def _decorator(obj):
38
+ return obj
39
+
40
+ return _decorator
41
+
42
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
43
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
44
+ from transformers.modeling_layers import (
45
+ GenericForQuestionAnswering,
46
+ GenericForSequenceClassification,
47
+ GenericForTokenClassification,
48
+ GradientCheckpointingLayer,
49
+ )
50
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
51
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
52
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
53
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
54
+ from transformers.utils.deprecation import deprecate_kwarg
55
+ try:
56
+ from transformers.utils.output_capturing import OutputRecorder
57
+ except ImportError:
58
+ from transformers.utils.generic import OutputRecorder
59
+
60
+ try:
61
+ from transformers.utils.generic import check_model_inputs
62
+ except ImportError:
63
+ # Fallback decorator when input-check helper is unavailable.
64
+ def check_model_inputs(func):
65
+ return func
66
+
67
+ from .configuration_minimax_m2 import MiniMaxM2Config
68
+
69
+
70
+ class MiniMaxM2MLP(nn.Module):
71
+ def __init__(self, config: MiniMaxM2Config):
72
+ super().__init__()
73
+ self.ffn_dim = config.intermediate_size
74
+ self.hidden_dim = config.hidden_size
75
+
76
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
77
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
78
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
79
+
80
+ self.act_fn = ACT2FN[config.hidden_act]
81
+
82
+ def forward(self, hidden_states):
83
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
84
+ current_hidden_states = self.w2(current_hidden_states)
85
+ return current_hidden_states
86
+
87
+
88
+ class MiniMaxM2Experts(nn.ModuleList):
89
+ """
90
+ ModuleList of experts.
91
+ """
92
+
93
+ def __init__(self, config: MiniMaxM2Config):
94
+ super().__init__()
95
+ self.top_k = config.num_experts_per_tok
96
+ self.num_experts = config.num_local_experts
97
+ for _ in range(self.num_experts):
98
+ self.append(MiniMaxM2MLP(config))
99
+
100
+ def forward(
101
+ self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor
102
+ ) -> torch.Tensor:
103
+ """
104
+ Args:
105
+ hidden_states: (batch_size * sequence_length, hidden_dim)
106
+ selected_experts: (batch_size * sequence_length, top_k)
107
+ routing_weights: (batch_size * sequence_length, top_k)
108
+ Returns:
109
+ (batch_size * sequence_length, hidden_dim)
110
+ """
111
+ final_hidden_states = torch.zeros_like(hidden_states)
112
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0)
113
+
114
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
115
+ for expert_idx in expert_hit:
116
+ idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
117
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
118
+ current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None]
119
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
120
+ return final_hidden_states
121
+
122
+
123
+ class MiniMaxM2SparseMoeBlock(nn.Module):
124
+ def __init__(self, config):
125
+ super().__init__()
126
+ self.top_k = config.num_experts_per_tok
127
+ self.jitter_noise = config.router_jitter_noise
128
+ self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False)
129
+ self.experts = MiniMaxM2Experts(config)
130
+ self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
131
+
132
+ def route_tokens_to_experts(self, router_logits):
133
+ routing_weights = torch.nn.functional.sigmoid(router_logits.float())
134
+ scores_for_choice = routing_weights + self.e_score_correction_bias
135
+ _, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
136
+ top_k_weights = routing_weights.gather(1, top_k_index)
137
+ top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
138
+ return top_k_index, top_k_weights.to(router_logits.dtype)
139
+
140
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
141
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
142
+ if self.training and self.jitter_noise > 0:
143
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
144
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
145
+ router_logits = self.gate(hidden_states)
146
+ top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
147
+ hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype))
148
+ hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
149
+ return hidden_states, router_logits
150
+
151
+
152
+ @use_kernel_forward_from_hub("RMSNorm")
153
+ class MiniMaxM2RMSNorm(nn.Module):
154
+ def __init__(self, hidden_size, eps=1e-6):
155
+ """
156
+ MiniMaxM2RMSNorm is equivalent to T5LayerNorm
157
+ """
158
+ super().__init__()
159
+ self.weight = nn.Parameter(torch.ones(hidden_size))
160
+ self.variance_epsilon = eps
161
+
162
+ def forward(self, hidden_states):
163
+ input_dtype = hidden_states.dtype
164
+ hidden_states = hidden_states.to(torch.float32)
165
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
166
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
167
+ return self.weight * hidden_states.to(input_dtype)
168
+
169
+ def extra_repr(self):
170
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
171
+
172
+
173
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
174
+ """
175
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
176
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
177
+ """
178
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
179
+ if n_rep == 1:
180
+ return hidden_states
181
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
182
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
183
+
184
+
185
+ def eager_attention_forward(
186
+ module: nn.Module,
187
+ query: torch.Tensor,
188
+ key: torch.Tensor,
189
+ value: torch.Tensor,
190
+ attention_mask: Optional[torch.Tensor],
191
+ scaling: float,
192
+ dropout: float = 0.0,
193
+ **kwargs: Unpack[TransformersKwargs],
194
+ ):
195
+ key_states = repeat_kv(key, module.num_key_value_groups)
196
+ value_states = repeat_kv(value, module.num_key_value_groups)
197
+
198
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
199
+ if attention_mask is not None:
200
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
201
+ attn_weights = attn_weights + causal_mask
202
+
203
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
204
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
205
+ attn_output = torch.matmul(attn_weights, value_states)
206
+ attn_output = attn_output.transpose(1, 2).contiguous()
207
+
208
+ return attn_output, attn_weights
209
+
210
+
211
+ def rotate_half(x):
212
+ """Rotates half the hidden dims of the input."""
213
+ x1 = x[..., : x.shape[-1] // 2]
214
+ x2 = x[..., x.shape[-1] // 2 :]
215
+ return torch.cat((-x2, x1), dim=-1)
216
+
217
+
218
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
219
+ """Applies Rotary Position Embedding to the query and key tensors.
220
+
221
+ Args:
222
+ q (`torch.Tensor`): The query tensor.
223
+ k (`torch.Tensor`): The key tensor.
224
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
225
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
226
+ position_ids (`torch.Tensor`, *optional*):
227
+ Deprecated and unused.
228
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
229
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
230
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
231
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
232
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
233
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
234
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
235
+ Returns:
236
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
237
+ """
238
+ cos = cos.unsqueeze(unsqueeze_dim)
239
+ sin = sin.unsqueeze(unsqueeze_dim)
240
+
241
+ # Keep half or full tensor for later concatenation
242
+ rotary_dim = cos.shape[-1]
243
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
244
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
245
+
246
+ # Apply rotary embeddings on the first half or full tensor
247
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
248
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
249
+
250
+ # Concatenate back to full shape
251
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
252
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
253
+ return q_embed, k_embed
254
+
255
+
256
+ class MiniMaxM2Attention(nn.Module):
257
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
258
+
259
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
260
+ super().__init__()
261
+ self.config = config
262
+ self.layer_idx = layer_idx
263
+ self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
264
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
265
+ self.scaling = self.head_dim**-0.5
266
+ self.attention_dropout = config.attention_dropout
267
+ self.is_causal = True
268
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
269
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
270
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
271
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
272
+
273
+ self.use_qk_norm = config.use_qk_norm
274
+ if self.use_qk_norm:
275
+ self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps)
276
+ self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps)
277
+
278
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
279
+ def forward(
280
+ self,
281
+ hidden_states: torch.Tensor,
282
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
283
+ attention_mask: Optional[torch.Tensor],
284
+ past_key_values: Optional[Cache] = None,
285
+ cache_position: Optional[torch.LongTensor] = None,
286
+ **kwargs: Unpack[FlashAttentionKwargs],
287
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
288
+ input_shape = hidden_states.shape[:-1]
289
+ hidden_shape = (*input_shape, -1, self.head_dim)
290
+
291
+ query_states = self.q_proj(hidden_states)
292
+ key_states = self.k_proj(hidden_states)
293
+ value_states = self.v_proj(hidden_states)
294
+
295
+ if self.use_qk_norm: # main diff from Llama
296
+ query_states = self.q_norm(query_states)
297
+ key_states = self.k_norm(key_states)
298
+
299
+ key_states = key_states.view(hidden_shape)
300
+ query_states = query_states.view(hidden_shape)
301
+ value_states = value_states.view(hidden_shape)
302
+
303
+ query_states = query_states.transpose(1, 2)
304
+ key_states = key_states.transpose(1, 2)
305
+ value_states = value_states.transpose(1, 2)
306
+
307
+ cos, sin = position_embeddings
308
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
309
+
310
+ if past_key_values is not None:
311
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
312
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
313
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
314
+
315
+ attention_interface: Callable = eager_attention_forward
316
+ if self.config._attn_implementation != "eager":
317
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
318
+
319
+ attn_output, attn_weights = attention_interface(
320
+ self,
321
+ query_states,
322
+ key_states,
323
+ value_states,
324
+ attention_mask,
325
+ dropout=0.0 if not self.training else self.attention_dropout,
326
+ scaling=self.scaling,
327
+ **kwargs,
328
+ )
329
+
330
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
331
+ attn_output = self.o_proj(attn_output)
332
+ return attn_output, attn_weights
333
+
334
+
335
+ class MiniMaxM2DecoderLayer(GradientCheckpointingLayer):
336
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
337
+ super().__init__()
338
+ self.hidden_size = config.hidden_size
339
+
340
+ self.self_attn = MiniMaxM2Attention(config, layer_idx)
341
+
342
+ self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
343
+ self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
344
+ self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
345
+
346
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
347
+ def forward(
348
+ self,
349
+ hidden_states: torch.Tensor,
350
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
351
+ attention_mask: Optional[torch.Tensor] = None,
352
+ position_ids: Optional[torch.LongTensor] = None,
353
+ past_key_values: Optional[Cache] = None,
354
+ cache_position: Optional[torch.LongTensor] = None,
355
+ **kwargs: Unpack[TransformersKwargs],
356
+ ) -> torch.FloatTensor:
357
+ residual = hidden_states
358
+
359
+ hidden_states = self.input_layernorm(hidden_states)
360
+
361
+ # Self Attention
362
+ hidden_states, _ = self.self_attn(
363
+ hidden_states=hidden_states,
364
+ position_embeddings=position_embeddings,
365
+ attention_mask=attention_mask,
366
+ position_ids=position_ids,
367
+ past_key_values=past_key_values,
368
+ cache_position=cache_position,
369
+ **kwargs,
370
+ )
371
+ hidden_states = residual + hidden_states
372
+
373
+ # Fully Connected
374
+ residual = hidden_states
375
+ hidden_states = self.post_attention_layernorm(hidden_states)
376
+ hidden_states, _ = self.block_sparse_moe(hidden_states)
377
+ hidden_states = residual + hidden_states
378
+
379
+ return hidden_states
380
+
381
+
382
+ class MiniMaxM2RotaryEmbedding(nn.Module):
383
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
384
+
385
+ def __init__(self, config: MiniMaxM2Config, device=None):
386
+ super().__init__()
387
+ # BC: "rope_type" was originally "type"
388
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
389
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
390
+ else:
391
+ self.rope_type = "default"
392
+ self.max_seq_len_cached = config.max_position_embeddings
393
+ self.original_max_seq_len = config.max_position_embeddings
394
+
395
+ self.config = config
396
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
397
+
398
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
399
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
400
+ self.original_inv_freq = self.inv_freq
401
+
402
+ @torch.no_grad()
403
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
404
+ def forward(self, x, position_ids):
405
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
406
+ position_ids_expanded = position_ids[:, None, :].float()
407
+
408
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
409
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
410
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
411
+ emb = torch.cat((freqs, freqs), dim=-1)
412
+ cos = emb.cos() * self.attention_scaling
413
+ sin = emb.sin() * self.attention_scaling
414
+
415
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
416
+
417
+
418
+ @auto_docstring
419
+ class MiniMaxM2PreTrainedModel(PreTrainedModel):
420
+ config: MiniMaxM2Config
421
+ base_model_prefix = "model"
422
+ supports_gradient_checkpointing = True
423
+ _no_split_modules = ["MiniMaxM2DecoderLayer"]
424
+ _skip_keys_device_placement = ["past_key_values"]
425
+ _supports_flash_attn = True
426
+ _supports_sdpa = True
427
+ _supports_flex_attn = True
428
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
429
+ _supports_attention_backend = True
430
+ _can_record_outputs = {
431
+ "router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1),
432
+ "hidden_states": MiniMaxM2DecoderLayer,
433
+ "attentions": MiniMaxM2Attention,
434
+ }
435
+
436
+
437
+ @auto_docstring
438
+ class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
439
+ def __init__(self, config: MiniMaxM2Config):
440
+ super().__init__(config)
441
+ self.padding_idx = config.pad_token_id
442
+ self.vocab_size = config.vocab_size
443
+
444
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
445
+ self.layers = nn.ModuleList(
446
+ [MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
447
+ )
448
+ self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
449
+ self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config)
450
+ self.gradient_checkpointing = False
451
+
452
+ # Initialize weights and apply final processing
453
+ self.post_init()
454
+
455
+ @check_model_inputs
456
+ @auto_docstring
457
+ def forward(
458
+ self,
459
+ input_ids: Optional[torch.LongTensor] = None,
460
+ attention_mask: Optional[torch.Tensor] = None,
461
+ position_ids: Optional[torch.LongTensor] = None,
462
+ past_key_values: Optional[Cache] = None,
463
+ inputs_embeds: Optional[torch.FloatTensor] = None,
464
+ use_cache: Optional[bool] = None,
465
+ cache_position: Optional[torch.LongTensor] = None,
466
+ **kwargs: Unpack[TransformersKwargs],
467
+ ) -> MoeModelOutputWithPast:
468
+ if (input_ids is None) ^ (inputs_embeds is not None):
469
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
470
+
471
+ if use_cache and past_key_values is None:
472
+ past_key_values = DynamicCache(config=self.config)
473
+
474
+ if inputs_embeds is None:
475
+ inputs_embeds = self.embed_tokens(input_ids)
476
+
477
+ if cache_position is None:
478
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
479
+ cache_position = torch.arange(
480
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
481
+ )
482
+ if position_ids is None:
483
+ position_ids = cache_position.unsqueeze(0)
484
+
485
+ mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
486
+ causal_mask = mask_function(
487
+ config=self.config,
488
+ input_embeds=inputs_embeds,
489
+ attention_mask=attention_mask,
490
+ cache_position=cache_position,
491
+ past_key_values=past_key_values,
492
+ position_ids=position_ids,
493
+ )
494
+
495
+ hidden_states = inputs_embeds
496
+
497
+ # create position embeddings to be shared across the decoder layers
498
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
499
+
500
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
501
+ hidden_states = decoder_layer(
502
+ hidden_states,
503
+ position_embeddings=position_embeddings,
504
+ attention_mask=causal_mask,
505
+ position_ids=position_ids,
506
+ past_key_values=past_key_values,
507
+ use_cache=use_cache,
508
+ cache_position=cache_position,
509
+ **kwargs,
510
+ )
511
+
512
+ hidden_states = self.norm(hidden_states)
513
+
514
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
515
+ last_hidden_state=hidden_states,
516
+ past_key_values=past_key_values,
517
+ )
518
+
519
+
520
+ def load_balancing_loss_func(
521
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
522
+ num_experts: Optional[int] = None,
523
+ top_k=2,
524
+ attention_mask: Optional[torch.Tensor] = None,
525
+ ) -> Union[torch.Tensor, int]:
526
+ r"""
527
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
528
+
529
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
530
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
531
+ experts is too unbalanced.
532
+
533
+ Args:
534
+ gate_logits:
535
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
536
+ shape [batch_size X sequence_length, num_experts].
537
+ num_experts:
538
+ Number of experts
539
+ top_k:
540
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
541
+ parameter.
542
+ attention_mask (`torch.Tensor`, *optional*):
543
+ The attention_mask used in forward function
544
+ shape [batch_size X sequence_length] if not None.
545
+
546
+ Returns:
547
+ The auxiliary loss.
548
+ """
549
+ if gate_logits is None or not isinstance(gate_logits, tuple):
550
+ return 0
551
+
552
+ if isinstance(gate_logits, tuple):
553
+ compute_device = gate_logits[0].device
554
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
555
+
556
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
557
+
558
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
559
+
560
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
561
+
562
+ if attention_mask is None:
563
+ # Compute the percentage of tokens routed to each experts
564
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
565
+
566
+ # Compute the average probability of routing to these experts
567
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
568
+ else:
569
+ batch_size, sequence_length = attention_mask.shape
570
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
571
+
572
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
573
+ expert_attention_mask = (
574
+ attention_mask[None, :, :, None, None]
575
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
576
+ .reshape(-1, top_k, num_experts)
577
+ .to(compute_device)
578
+ )
579
+
580
+ # Compute the percentage of tokens routed to each experts
581
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
582
+ expert_attention_mask, dim=0
583
+ )
584
+
585
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
586
+ router_per_expert_attention_mask = (
587
+ attention_mask[None, :, :, None]
588
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
589
+ .reshape(-1, num_experts)
590
+ .to(compute_device)
591
+ )
592
+
593
+ # Compute the average probability of routing to these experts
594
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
595
+ router_per_expert_attention_mask, dim=0
596
+ )
597
+
598
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
599
+ return overall_loss * num_experts
600
+
601
+
602
+ @auto_docstring
603
+ class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
604
+ _tied_weights_keys = ["lm_head.weight"]
605
+ _tp_plan = {"lm_head": "colwise_rep"}
606
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
607
+
608
+ def __init__(self, config):
609
+ super().__init__(config)
610
+ self.model = MiniMaxM2Model(config)
611
+ self.vocab_size = config.vocab_size
612
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
613
+ self.router_aux_loss_coef = config.router_aux_loss_coef
614
+ self.num_experts = config.num_local_experts
615
+ self.num_experts_per_tok = config.num_experts_per_tok
616
+
617
+ # Initialize weights and apply final processing
618
+ self.post_init()
619
+
620
+ @can_return_tuple
621
+ @auto_docstring
622
+ def forward(
623
+ self,
624
+ input_ids: Optional[torch.LongTensor] = None,
625
+ attention_mask: Optional[torch.Tensor] = None,
626
+ position_ids: Optional[torch.LongTensor] = None,
627
+ past_key_values: Optional[Cache] = None,
628
+ inputs_embeds: Optional[torch.FloatTensor] = None,
629
+ labels: Optional[torch.LongTensor] = None,
630
+ use_cache: Optional[bool] = None,
631
+ output_router_logits: Optional[bool] = None,
632
+ cache_position: Optional[torch.LongTensor] = None,
633
+ logits_to_keep: Union[int, torch.Tensor] = 0,
634
+ **kwargs: Unpack[TransformersKwargs],
635
+ ) -> MoeCausalLMOutputWithPast:
636
+ r"""
637
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
638
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
639
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
640
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
641
+
642
+ Example:
643
+
644
+ ```python
645
+ >>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM
646
+
647
+ >>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
648
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
649
+
650
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
651
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
652
+
653
+ >>> # Generate
654
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
655
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
656
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
657
+ ```"""
658
+
659
+ output_router_logits = (
660
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
661
+ )
662
+
663
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
664
+ outputs: MoeModelOutputWithPast = self.model(
665
+ input_ids=input_ids,
666
+ attention_mask=attention_mask,
667
+ position_ids=position_ids,
668
+ past_key_values=past_key_values,
669
+ inputs_embeds=inputs_embeds,
670
+ use_cache=use_cache,
671
+ output_router_logits=output_router_logits,
672
+ cache_position=cache_position,
673
+ **kwargs,
674
+ )
675
+
676
+ hidden_states = outputs.last_hidden_state
677
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
678
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
679
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
680
+
681
+ loss = None
682
+ if labels is not None:
683
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
684
+
685
+ aux_loss = None
686
+ if output_router_logits:
687
+ aux_loss = load_balancing_loss_func(
688
+ outputs.router_logits,
689
+ self.num_experts,
690
+ self.num_experts_per_tok,
691
+ attention_mask,
692
+ )
693
+ if labels is not None:
694
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
695
+
696
+ return MoeCausalLMOutputWithPast(
697
+ loss=loss,
698
+ aux_loss=aux_loss,
699
+ logits=logits,
700
+ past_key_values=outputs.past_key_values,
701
+ hidden_states=outputs.hidden_states,
702
+ attentions=outputs.attentions,
703
+ router_logits=outputs.router_logits,
704
+ )
705
+
706
+
707
+ class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel):
708
+ pass
709
+
710
+
711
+ class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel):
712
+ pass
713
+
714
+
715
+ class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel):
716
+ pass
717
+
718
+
719
+ __all__ = [
720
+ "MiniMaxM2ForCausalLM",
721
+ "MiniMaxM2ForQuestionAnswering",
722
+ "MiniMaxM2Model",
723
+ "MiniMaxM2PreTrainedModel",
724
+ "MiniMaxM2ForSequenceClassification",
725
+ "MiniMaxM2ForTokenClassification",
726
+ ]
quark_profile.yaml ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Quark Profiling Results
2
+
3
+ memory_usage:
4
+ - step: "Start"
5
+ timestamp: 1776943970.2197673
6
+ relative_time_secs: 0.0
7
+ cpu_memory_mb: 3155.46
8
+ gpu_memory_mb: 252626.48
9
+ disk_read_mb: 0.0
10
+ disk_write_mb: 0.0
11
+ - step: "Model Loading Start"
12
+ timestamp: 1776943970.4032104
13
+ relative_time_secs: 0.1834430694580078
14
+ cpu_memory_mb: 3155.46
15
+ gpu_memory_mb: 252626.48
16
+ disk_read_mb: 0.0
17
+ disk_write_mb: 0.0
18
+ - step: "Model Loading End"
19
+ timestamp: 1776944102.8076537
20
+ relative_time_secs: 132.58788633346558
21
+ cpu_memory_mb: 4303.91
22
+ gpu_memory_mb: 252626.72
23
+ disk_read_mb: 0.0
24
+ disk_write_mb: 0.0
25
+ - step: "Dataset Loading Start"
26
+ timestamp: 1776944118.0519834
27
+ relative_time_secs: 147.8322160243988
28
+ cpu_memory_mb: 14575.46
29
+ gpu_memory_mb: 252627.2
30
+ disk_read_mb: 2.58
31
+ disk_write_mb: 5616.16
32
+ - step: "Dataset Loading End"
33
+ timestamp: 1776944131.2534077
34
+ relative_time_secs: 161.03364038467407
35
+ cpu_memory_mb: 14849.92
36
+ gpu_memory_mb: 252627.24
37
+ disk_read_mb: 2.6
38
+ disk_write_mb: 7351.78
39
+ - step: "Model Quantization Start"
40
+ timestamp: 1776944131.4114296
41
+ relative_time_secs: 161.19166231155396
42
+ cpu_memory_mb: 14849.92
43
+ gpu_memory_mb: 252627.24
44
+ disk_read_mb: 2.6
45
+ disk_write_mb: 7351.78
46
+ - step: "Model Preparation Start"
47
+ timestamp: 1776944131.608502
48
+ relative_time_secs: 161.3887345790863
49
+ cpu_memory_mb: 14849.92
50
+ gpu_memory_mb: 252627.24
51
+ disk_read_mb: 2.6
52
+ disk_write_mb: 7351.78
53
+ - step: "Model Preparation End"
54
+ timestamp: 1776944144.8085756
55
+ relative_time_secs: 174.58880829811096
56
+ cpu_memory_mb: 16457.26
57
+ gpu_memory_mb: 252627.29
58
+ disk_read_mb: 2.6
59
+ disk_write_mb: 7351.78
60
+ - step: "Advanced Algorithms Start"
61
+ timestamp: 1776944145.0606568
62
+ relative_time_secs: 174.84088945388794
63
+ cpu_memory_mb: 16457.26
64
+ gpu_memory_mb: 252627.29
65
+ disk_read_mb: 2.6
66
+ disk_write_mb: 7351.78
67
+ - step: "Advanced Algorithms End"
68
+ timestamp: 1776944145.183013
69
+ relative_time_secs: 174.96324563026428
70
+ cpu_memory_mb: 16457.26
71
+ gpu_memory_mb: 252627.29
72
+ disk_read_mb: 2.6
73
+ disk_write_mb: 7351.78
74
+ - step: "Calibration Start"
75
+ timestamp: 1776944145.346003
76
+ relative_time_secs: 175.12623572349548
77
+ cpu_memory_mb: 16457.26
78
+ gpu_memory_mb: 252627.29
79
+ disk_read_mb: 2.6
80
+ disk_write_mb: 7351.78
81
+ - step: "Calibration End"
82
+ timestamp: 1776944198.1818697
83
+ relative_time_secs: 227.9621024131775
84
+ cpu_memory_mb: 16832.04
85
+ gpu_memory_mb: 252627.35
86
+ disk_read_mb: 8.91
87
+ disk_write_mb: 7425.28
88
+ - step: "Model Quantization End"
89
+ timestamp: 1776944202.6519358
90
+ relative_time_secs: 232.43216848373413
91
+ cpu_memory_mb: 16831.62
92
+ gpu_memory_mb: 252627.35
93
+ disk_read_mb: 8.91
94
+ disk_write_mb: 7425.28
95
+ - step: "Freeze Model Start"
96
+ timestamp: 1776944202.8819818
97
+ relative_time_secs: 232.66221451759338
98
+ cpu_memory_mb: 16831.62
99
+ gpu_memory_mb: 252627.35
100
+ disk_read_mb: 8.91
101
+ disk_write_mb: 7425.28
102
+ - step: "Freeze Model End"
103
+ timestamp: 1776944215.6099172
104
+ relative_time_secs: 245.39014983177185
105
+ cpu_memory_mb: 36070.09
106
+ gpu_memory_mb: 252627.44
107
+ disk_read_mb: 9.73
108
+ disk_write_mb: 7435.34
109
+ - step: "Export HF Safetensors Start"
110
+ timestamp: 1776944215.7945082
111
+ relative_time_secs: 245.57474088668823
112
+ cpu_memory_mb: 36070.09
113
+ gpu_memory_mb: 252627.44
114
+ disk_read_mb: 9.73
115
+ disk_write_mb: 7435.34
116
+ - step: "Export HF Safetensors End"
117
+ timestamp: 1776944312.2179017
118
+ relative_time_secs: 341.99813437461853
119
+ cpu_memory_mb: 26283.43
120
+ gpu_memory_mb: 252627.45
121
+ disk_read_mb: 9.79
122
+ disk_write_mb: 128965.3
123
+ - step: "End"
124
+ timestamp: 1776944312.4254723
125
+ relative_time_secs: 342.20570492744446
126
+ cpu_memory_mb: 26281.42
127
+ gpu_memory_mb: 252627.45
128
+ disk_read_mb: 9.79
129
+ disk_write_mb: 128965.3
130
+
131
+ # Summary Metrics
132
+ total_quantization_time_seconds: 342.2058
133
+ peak_memory_mb: 25348.9
134
+ peak_gpu_memory_mb: 252627.45
135
+ total_disk_read_mb: 9.79
136
+ total_disk_write_mb: 128965.3
137
+
138
+ # Metric Definitions:
139
+ #
140
+ # Checkpoint Metrics (per record):
141
+ # - step: Name of the profiling checkpoint. Common steps include:
142
+ # - "Start": Initial state when profiling begins
143
+ # - "Model Loaded": After loading the ONNX model into memory
144
+ # - "Pre-process Start/End": Before and after model preprocessing
145
+ # - "Calibration Start/End": Before and after calibration data collection
146
+ # - "Quantization (MatMulNBits) Start/End": MatMulNBits quantization phase
147
+ # - "Quantization (Static) Start/End": Static quantization phase
148
+ # - "Post-process Start/End": Before and after post-processing
149
+ # - "Fast Finetune Start/End": Before and after fast finetuning (if enabled)
150
+ # - timestamp: Unix timestamp (seconds since epoch) when this measurement was taken. Useful for correlating with external logs or events.
151
+ # - relative_time_secs: Time elapsed (in seconds) since the "Start" step. Useful for understanding the duration of each phase relative to the beginning of profiling.
152
+ # - cpu_memory_mb: Current Resident Set Size (RSS) in megabytes at this step. This includes memory from the main process and all child processes. RSS represents the portion of memory held in RAM (not swapped out).
153
+ # - gpu_memory_mb: Current GPU memory usage in megabytes. This represents actual GPU memory used by the process, including allocations from PyTorch, ONNX Runtime, TensorRT, and other frameworks. Only available when PyTorch with CUDA/ROCm is installed and GPU is available.
154
+ # - disk_read_mb: Cumulative disk bytes read (in megabytes) since the start of profiling. Measured relative to the baseline captured at the 'Start' checkpoint, including I/O from the main process and all child processes. Only available when psutil is installed and the OS exposes per-process I/O counters (Linux /proc/<pid>/io, Windows; not available on macOS without root).
155
+ # - disk_write_mb: Cumulative disk bytes written (in megabytes) since the start of profiling. Measured relative to the baseline captured at the 'Start' checkpoint, including I/O from the main process and all child processes. Only available when psutil is installed and the OS exposes per-process I/O counters (Linux /proc/<pid>/io, Windows; not available on macOS without root).
156
+ #
157
+ # Summary Metrics (overall):
158
+ # - total_quantization_time_seconds: Total elapsed time (in seconds) from the start of profiling to the end of the quantization process.
159
+ # - peak_memory_mb: Peak resident set size (RSS) in megabytes for the main process during the entire profiling session. On Linux, this is read from VmHWM (high water mark) in /proc/<pid>/status. On Windows, this is the peak working set size. This metric may not be available on all platforms.
160
+ # - peak_gpu_memory_mb: Peak GPU memory usage in megabytes during the entire profiling session. This is the maximum GPU memory used, including allocations from PyTorch, ONNX Runtime, TensorRT, and other frameworks. Only available when PyTorch with CUDA/ROCm is installed and GPU is available.
161
+ # - total_disk_read_mb: Total disk bytes read (in megabytes) during the entire profiling session. Computed as the difference between the final and baseline cumulative read counters, including I/O from the main process and all child processes. Only available when psutil is installed and the OS exposes per-process I/O counters (Linux /proc/<pid>/io, Windows; not available on macOS without root).
162
+ # - total_disk_write_mb: Total disk bytes written (in megabytes) during the entire profiling session. Computed as the difference between the final and baseline cumulative write counters, including I/O from the main process and all child processes. Only available when psutil is installed and the OS exposes per-process I/O counters (Linux /proc/<pid>/io, Windows; not available on macOS without root).
163
+ # - peak_cache_dir_disk_usage_mb: Highest peak increase in disk usage (in megabytes) among all cache directories created during the profiling session, relative to each cache directory's size when monitoring started. Sampled every 1 second by recursively summing file sizes with os.scandir().
special_tokens_map.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<code_interpreter>",
4
+ "<commit_after>",
5
+ "<commit_before>",
6
+ "<commit_msg>",
7
+ "<empty_output>",
8
+ "<filename>",
9
+ "<fim_middle>",
10
+ "<fim_pad>",
11
+ "<fim_prefix>",
12
+ "<fim_suffix>",
13
+ "<function_call>",
14
+ "<gh_stars>",
15
+ "]<]speech[>[",
16
+ "]<]image[>[",
17
+ "]<]video[>[",
18
+ "]<]start of speech[>[",
19
+ "]<]end of speech[>[",
20
+ "]<]start of image[>[",
21
+ "]<]end of image[>[",
22
+ "]<]start of video[>[",
23
+ "]<]end of video[>[",
24
+ "]<]vision pad[>[",
25
+ "]~!b[",
26
+ "<issue_closed>",
27
+ "<issue_comment>",
28
+ "<issue_start>",
29
+ "<jupyter_code>",
30
+ "<jupyter_output>",
31
+ "<jupyter_start>",
32
+ "<jupyter_text>",
33
+ "<reponame>",
34
+ "[e~[",
35
+ "]!d~[",
36
+ "]!p~[",
37
+ "]~b]",
38
+ "<jupyter_error>",
39
+ "<add_file>",
40
+ "<delete_file>",
41
+ "<rename_file>",
42
+ "<edit_file>",
43
+ "<commit_message>",
44
+ "<empty_source_file>",
45
+ "<repo_struct>",
46
+ "<code_context>",
47
+ "<file_content>",
48
+ "<source_files>",
49
+ "<pr_start>",
50
+ "<review_comment>",
51
+ "<filepath>",
52
+ "<file_sep>"
53
+ ],
54
+ "bos_token": {
55
+ "content": "]~!b[",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false
60
+ },
61
+ "eos_token": {
62
+ "content": "[e~[",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false
67
+ },
68
+ "pad_token": "[e~[",
69
+ "unk_token": {
70
+ "content": "]!d~[",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false
75
+ }
76
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad01040a70bcef384b88cf9a00f75c1b73106acf4ab506fe04d75a89591111dc
3
+ size 15523019
tokenizer_config.json ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "200000": {
5
+ "content": "]!p~[",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "200001": {
13
+ "content": "<fim_prefix>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "200002": {
21
+ "content": "<fim_middle>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "200003": {
29
+ "content": "<fim_suffix>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "200004": {
37
+ "content": "<fim_pad>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "200005": {
45
+ "content": "<reponame>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "200006": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "200007": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "200008": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "200009": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "200010": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "200011": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "200012": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "200013": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "200014": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "200015": {
125
+ "content": "<empty_output>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "200016": {
133
+ "content": "<commit_before>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "200017": {
141
+ "content": "<commit_msg>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "200018": {
149
+ "content": "<commit_after>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ },
156
+ "200019": {
157
+ "content": "]~b]",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": true
163
+ },
164
+ "200020": {
165
+ "content": "[e~[",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "200021": {
173
+ "content": "]!d~[",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ },
180
+ "200022": {
181
+ "content": "<function_call>",
182
+ "lstrip": false,
183
+ "normalized": false,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": true
187
+ },
188
+ "200023": {
189
+ "content": "<code_interpreter>",
190
+ "lstrip": false,
191
+ "normalized": false,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": true
195
+ },
196
+ "200024": {
197
+ "content": "]<]speech[>[",
198
+ "lstrip": false,
199
+ "normalized": false,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": true
203
+ },
204
+ "200025": {
205
+ "content": "]<]image[>[",
206
+ "lstrip": false,
207
+ "normalized": false,
208
+ "rstrip": false,
209
+ "single_word": false,
210
+ "special": true
211
+ },
212
+ "200026": {
213
+ "content": "]<]video[>[",
214
+ "lstrip": false,
215
+ "normalized": false,
216
+ "rstrip": false,
217
+ "single_word": false,
218
+ "special": true
219
+ },
220
+ "200027": {
221
+ "content": "]<]start of speech[>[",
222
+ "lstrip": false,
223
+ "normalized": false,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": true
227
+ },
228
+ "200028": {
229
+ "content": "]<]end of speech[>[",
230
+ "lstrip": false,
231
+ "normalized": false,
232
+ "rstrip": false,
233
+ "single_word": false,
234
+ "special": true
235
+ },
236
+ "200029": {
237
+ "content": "]<]start of image[>[",
238
+ "lstrip": false,
239
+ "normalized": false,
240
+ "rstrip": false,
241
+ "single_word": false,
242
+ "special": true
243
+ },
244
+ "200030": {
245
+ "content": "]<]end of image[>[",
246
+ "lstrip": false,
247
+ "normalized": false,
248
+ "rstrip": false,
249
+ "single_word": false,
250
+ "special": true
251
+ },
252
+ "200031": {
253
+ "content": "]<]start of video[>[",
254
+ "lstrip": false,
255
+ "normalized": false,
256
+ "rstrip": false,
257
+ "single_word": false,
258
+ "special": true
259
+ },
260
+ "200032": {
261
+ "content": "]<]end of video[>[",
262
+ "lstrip": false,
263
+ "normalized": false,
264
+ "rstrip": false,
265
+ "single_word": false,
266
+ "special": true
267
+ },
268
+ "200033": {
269
+ "content": "]<]vision pad[>[",
270
+ "lstrip": false,
271
+ "normalized": false,
272
+ "rstrip": false,
273
+ "single_word": false,
274
+ "special": true
275
+ },
276
+ "200034": {
277
+ "content": "]~!b[",
278
+ "lstrip": false,
279
+ "normalized": false,
280
+ "rstrip": false,
281
+ "single_word": false,
282
+ "special": true
283
+ },
284
+ "200035": {
285
+ "content": "<jupyter_error>",
286
+ "lstrip": false,
287
+ "normalized": false,
288
+ "rstrip": false,
289
+ "single_word": false,
290
+ "special": true
291
+ },
292
+ "200036": {
293
+ "content": "<add_file>",
294
+ "lstrip": false,
295
+ "normalized": false,
296
+ "rstrip": false,
297
+ "single_word": false,
298
+ "special": true
299
+ },
300
+ "200037": {
301
+ "content": "<delete_file>",
302
+ "lstrip": false,
303
+ "normalized": false,
304
+ "rstrip": false,
305
+ "single_word": false,
306
+ "special": true
307
+ },
308
+ "200038": {
309
+ "content": "<rename_file>",
310
+ "lstrip": false,
311
+ "normalized": false,
312
+ "rstrip": false,
313
+ "single_word": false,
314
+ "special": true
315
+ },
316
+ "200039": {
317
+ "content": "<edit_file>",
318
+ "lstrip": false,
319
+ "normalized": false,
320
+ "rstrip": false,
321
+ "single_word": false,
322
+ "special": true
323
+ },
324
+ "200040": {
325
+ "content": "<commit_message>",
326
+ "lstrip": false,
327
+ "normalized": false,
328
+ "rstrip": false,
329
+ "single_word": false,
330
+ "special": true
331
+ },
332
+ "200041": {
333
+ "content": "<empty_source_file>",
334
+ "lstrip": false,
335
+ "normalized": false,
336
+ "rstrip": false,
337
+ "single_word": false,
338
+ "special": true
339
+ },
340
+ "200042": {
341
+ "content": "<repo_struct>",
342
+ "lstrip": false,
343
+ "normalized": false,
344
+ "rstrip": false,
345
+ "single_word": false,
346
+ "special": true
347
+ },
348
+ "200043": {
349
+ "content": "<code_context>",
350
+ "lstrip": false,
351
+ "normalized": false,
352
+ "rstrip": false,
353
+ "single_word": false,
354
+ "special": true
355
+ },
356
+ "200044": {
357
+ "content": "<file_content>",
358
+ "lstrip": false,
359
+ "normalized": false,
360
+ "rstrip": false,
361
+ "single_word": false,
362
+ "special": true
363
+ },
364
+ "200045": {
365
+ "content": "<source_files>",
366
+ "lstrip": false,
367
+ "normalized": false,
368
+ "rstrip": false,
369
+ "single_word": false,
370
+ "special": true
371
+ },
372
+ "200046": {
373
+ "content": "<pr_start>",
374
+ "lstrip": false,
375
+ "normalized": false,
376
+ "rstrip": false,
377
+ "single_word": false,
378
+ "special": true
379
+ },
380
+ "200047": {
381
+ "content": "<review_comment>",
382
+ "lstrip": false,
383
+ "normalized": false,
384
+ "rstrip": false,
385
+ "single_word": false,
386
+ "special": true
387
+ },
388
+ "200048": {
389
+ "content": "<filepath>",
390
+ "lstrip": false,
391
+ "normalized": false,
392
+ "rstrip": false,
393
+ "single_word": false,
394
+ "special": true
395
+ },
396
+ "200049": {
397
+ "content": "<file_sep>",
398
+ "lstrip": false,
399
+ "normalized": false,
400
+ "rstrip": false,
401
+ "single_word": false,
402
+ "special": true
403
+ },
404
+ "200050": {
405
+ "content": "<think>",
406
+ "lstrip": false,
407
+ "normalized": false,
408
+ "rstrip": false,
409
+ "single_word": false,
410
+ "special": false
411
+ },
412
+ "200051": {
413
+ "content": "</think>",
414
+ "lstrip": false,
415
+ "normalized": false,
416
+ "rstrip": false,
417
+ "single_word": false,
418
+ "special": false
419
+ },
420
+ "200052": {
421
+ "content": "<minimax:tool_call>",
422
+ "lstrip": false,
423
+ "normalized": false,
424
+ "rstrip": false,
425
+ "single_word": false,
426
+ "special": false
427
+ },
428
+ "200053": {
429
+ "content": "</minimax:tool_call>",
430
+ "lstrip": false,
431
+ "normalized": false,
432
+ "rstrip": false,
433
+ "single_word": false,
434
+ "special": false
435
+ }
436
+ },
437
+ "additional_special_tokens": [
438
+ "<code_interpreter>",
439
+ "<commit_after>",
440
+ "<commit_before>",
441
+ "<commit_msg>",
442
+ "<empty_output>",
443
+ "<filename>",
444
+ "<fim_middle>",
445
+ "<fim_pad>",
446
+ "<fim_prefix>",
447
+ "<fim_suffix>",
448
+ "<function_call>",
449
+ "<gh_stars>",
450
+ "]<]speech[>[",
451
+ "]<]image[>[",
452
+ "]<]video[>[",
453
+ "]<]start of speech[>[",
454
+ "]<]end of speech[>[",
455
+ "]<]start of image[>[",
456
+ "]<]end of image[>[",
457
+ "]<]start of video[>[",
458
+ "]<]end of video[>[",
459
+ "]<]vision pad[>[",
460
+ "]~!b[",
461
+ "<issue_closed>",
462
+ "<issue_comment>",
463
+ "<issue_start>",
464
+ "<jupyter_code>",
465
+ "<jupyter_output>",
466
+ "<jupyter_start>",
467
+ "<jupyter_text>",
468
+ "<reponame>",
469
+ "[e~[",
470
+ "]!d~[",
471
+ "]!p~[",
472
+ "]~b]",
473
+ "<jupyter_error>",
474
+ "<add_file>",
475
+ "<delete_file>",
476
+ "<rename_file>",
477
+ "<edit_file>",
478
+ "<commit_message>",
479
+ "<empty_source_file>",
480
+ "<repo_struct>",
481
+ "<code_context>",
482
+ "<file_content>",
483
+ "<source_files>",
484
+ "<pr_start>",
485
+ "<review_comment>",
486
+ "<filepath>",
487
+ "<file_sep>"
488
+ ],
489
+ "bos_token": "]~!b[",
490
+ "clean_up_tokenization_spaces": false,
491
+ "eos_token": "[e~[",
492
+ "extra_special_tokens": {},
493
+ "model_max_length": 40960000,
494
+ "pad_token": "[e~[",
495
+ "padding_side": "left",
496
+ "tokenizer_class": "GPT2Tokenizer",
497
+ "unk_token": "]!d~["
498
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff