Add files using upload-large-folder tool
Browse files- .gitattributes +2 -0
- __init__.py +26 -0
- added_tokens.json +56 -0
- chat_template.jinja +159 -0
- config.json +147 -0
- configuration_minimax_m2.py +131 -0
- generation_config.json +5 -0
- merges.txt +0 -0
- model-00001-of-00032.safetensors +3 -0
- model-00002-of-00032.safetensors +3 -0
- model-00003-of-00032.safetensors +3 -0
- model-00004-of-00032.safetensors +3 -0
- model-00005-of-00032.safetensors +3 -0
- model-00006-of-00032.safetensors +3 -0
- model-00007-of-00032.safetensors +3 -0
- model-00008-of-00032.safetensors +3 -0
- model-00009-of-00032.safetensors +3 -0
- model-00010-of-00032.safetensors +3 -0
- model-00011-of-00032.safetensors +3 -0
- model-00012-of-00032.safetensors +3 -0
- model-00013-of-00032.safetensors +3 -0
- model-00014-of-00032.safetensors +3 -0
- model-00015-of-00032.safetensors +3 -0
- model-00016-of-00032.safetensors +3 -0
- model-00017-of-00032.safetensors +3 -0
- model-00018-of-00032.safetensors +3 -0
- model-00019-of-00032.safetensors +3 -0
- model-00020-of-00032.safetensors +3 -0
- model-00021-of-00032.safetensors +3 -0
- model-00022-of-00032.safetensors +3 -0
- model-00023-of-00032.safetensors +3 -0
- model-00024-of-00032.safetensors +3 -0
- model-00025-of-00032.safetensors +3 -0
- model-00026-of-00032.safetensors +3 -0
- model-00027-of-00032.safetensors +3 -0
- model-00028-of-00032.safetensors +3 -0
- model-00029-of-00032.safetensors +3 -0
- model-00030-of-00032.safetensors +3 -0
- model-00031-of-00032.safetensors +3 -0
- model-00032-of-00032.safetensors +3 -0
- model.safetensors.index.json +3 -0
- modeling_minimax_m2.py +765 -0
- quant_log.csv +0 -0
- quantize_config.json +25 -0
- special_tokens_map.json +76 -0
- test_minimax_m2_hf.py +178 -0
- tokenizer.json +3 -0
- tokenizer_config.json +497 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2024-2025 ModelCloud.ai
|
| 2 |
+
# SPDX-FileCopyrightText: 2024-2025 qubitium@modelcloud.ai
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
# Contact: qubitium@modelcloud.ai, x.com/qubitium
|
| 5 |
+
#
|
| 6 |
+
# """MiniMax M2 Hugging Face remote code support."""
|
| 7 |
+
|
| 8 |
+
from .configuration_minimax_m2 import MiniMaxM2Config
|
| 9 |
+
from .modeling_minimax_m2 import (
|
| 10 |
+
MiniMaxForCausalLM,
|
| 11 |
+
MiniMaxM2ForCausalLM,
|
| 12 |
+
MiniMaxM2Model,
|
| 13 |
+
MiniMaxM2PreTrainedModel,
|
| 14 |
+
MiniMaxModel,
|
| 15 |
+
MiniMaxPreTrainedModel,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"MiniMaxM2Config",
|
| 20 |
+
"MiniMaxM2PreTrainedModel",
|
| 21 |
+
"MiniMaxM2Model",
|
| 22 |
+
"MiniMaxM2ForCausalLM",
|
| 23 |
+
"MiniMaxPreTrainedModel",
|
| 24 |
+
"MiniMaxModel",
|
| 25 |
+
"MiniMaxForCausalLM",
|
| 26 |
+
]
|
added_tokens.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</minimax:tool_call>": 200053,
|
| 3 |
+
"</think>": 200051,
|
| 4 |
+
"<add_file>": 200036,
|
| 5 |
+
"<code_context>": 200043,
|
| 6 |
+
"<code_interpreter>": 200023,
|
| 7 |
+
"<commit_after>": 200018,
|
| 8 |
+
"<commit_before>": 200016,
|
| 9 |
+
"<commit_message>": 200040,
|
| 10 |
+
"<commit_msg>": 200017,
|
| 11 |
+
"<delete_file>": 200037,
|
| 12 |
+
"<edit_file>": 200039,
|
| 13 |
+
"<empty_output>": 200015,
|
| 14 |
+
"<empty_source_file>": 200041,
|
| 15 |
+
"<file_content>": 200044,
|
| 16 |
+
"<file_sep>": 200049,
|
| 17 |
+
"<filename>": 200006,
|
| 18 |
+
"<filepath>": 200048,
|
| 19 |
+
"<fim_middle>": 200002,
|
| 20 |
+
"<fim_pad>": 200004,
|
| 21 |
+
"<fim_prefix>": 200001,
|
| 22 |
+
"<fim_suffix>": 200003,
|
| 23 |
+
"<function_call>": 200022,
|
| 24 |
+
"<gh_stars>": 200007,
|
| 25 |
+
"<issue_closed>": 200010,
|
| 26 |
+
"<issue_comment>": 200009,
|
| 27 |
+
"<issue_start>": 200008,
|
| 28 |
+
"<jupyter_code>": 200013,
|
| 29 |
+
"<jupyter_error>": 200035,
|
| 30 |
+
"<jupyter_output>": 200014,
|
| 31 |
+
"<jupyter_start>": 200011,
|
| 32 |
+
"<jupyter_text>": 200012,
|
| 33 |
+
"<minimax:tool_call>": 200052,
|
| 34 |
+
"<pr_start>": 200046,
|
| 35 |
+
"<rename_file>": 200038,
|
| 36 |
+
"<repo_struct>": 200042,
|
| 37 |
+
"<reponame>": 200005,
|
| 38 |
+
"<review_comment>": 200047,
|
| 39 |
+
"<source_files>": 200045,
|
| 40 |
+
"<think>": 200050,
|
| 41 |
+
"[e~[": 200020,
|
| 42 |
+
"]!d~[": 200021,
|
| 43 |
+
"]!p~[": 200000,
|
| 44 |
+
"]<]end of image[>[": 200030,
|
| 45 |
+
"]<]end of speech[>[": 200028,
|
| 46 |
+
"]<]end of video[>[": 200032,
|
| 47 |
+
"]<]image[>[": 200025,
|
| 48 |
+
"]<]speech[>[": 200024,
|
| 49 |
+
"]<]start of image[>[": 200029,
|
| 50 |
+
"]<]start of speech[>[": 200027,
|
| 51 |
+
"]<]start of video[>[": 200031,
|
| 52 |
+
"]<]video[>[": 200026,
|
| 53 |
+
"]<]vision pad[>[": 200033,
|
| 54 |
+
"]~!b[": 200034,
|
| 55 |
+
"]~b]": 200019
|
| 56 |
+
}
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{# ----------‑‑‑ special token variables ‑‑‑---------- #}
|
| 2 |
+
{%- set toolcall_begin_token = '<minimax:tool_call>' -%}
|
| 3 |
+
{%- set toolcall_end_token = '</minimax:tool_call>' -%}
|
| 4 |
+
{#- Tool Rendering Functions ============================================== -#}
|
| 5 |
+
{%- macro render_tool_namespace(namespace_name, tool_list) -%}
|
| 6 |
+
{%- for tool in tool_list -%}
|
| 7 |
+
<tool>{{ tool.function | tojson(ensure_ascii=False) }}</tool>
|
| 8 |
+
{% endfor -%}
|
| 9 |
+
{%- endmacro -%}
|
| 10 |
+
{%- macro visible_text(content) -%}
|
| 11 |
+
{%- if content is string -%}
|
| 12 |
+
{{ content }}
|
| 13 |
+
{%- elif content is iterable and content is not mapping -%}
|
| 14 |
+
{%- for item in content -%}
|
| 15 |
+
{%- if item is mapping and item.type == 'text' -%}
|
| 16 |
+
{{- item.text }}
|
| 17 |
+
{%- elif item is string -%}
|
| 18 |
+
{{- item }}
|
| 19 |
+
{%- endif -%}
|
| 20 |
+
{%- endfor -%}
|
| 21 |
+
{%- else -%}
|
| 22 |
+
{{- content }}
|
| 23 |
+
{%- endif -%}
|
| 24 |
+
{%- endmacro -%}
|
| 25 |
+
{#- System Message Construction ============================================ -#}
|
| 26 |
+
{%- macro build_system_message(system_message) -%}
|
| 27 |
+
{%- if system_message and system_message.content -%}
|
| 28 |
+
{{- visible_text(system_message.content) }}
|
| 29 |
+
{%- else -%}
|
| 30 |
+
{%- if model_identity is not defined -%}
|
| 31 |
+
{%- set model_identity = "You are a helpful assistant." -%}
|
| 32 |
+
{%- endif -%}
|
| 33 |
+
{{- model_identity }}
|
| 34 |
+
{%- endif -%}
|
| 35 |
+
|
| 36 |
+
{#- Handle current_date -#}
|
| 37 |
+
{%- if system_message and system_message.current_date -%}
|
| 38 |
+
{{- '\n' ~ 'Current date: ' + system_message.current_date }}
|
| 39 |
+
{%- endif -%}
|
| 40 |
+
{#- Handle current_location -#}
|
| 41 |
+
{%- if system_message and system_message.current_location -%}
|
| 42 |
+
{{- '\n' ~ 'Current location: ' + system_message.current_location }}
|
| 43 |
+
{%- endif -%}
|
| 44 |
+
{%- endmacro -%}
|
| 45 |
+
{#- Main Template Logic ================================================= -#}
|
| 46 |
+
{#- Extract system message (only first message if it's system) -#}
|
| 47 |
+
{%- set system_message = none -%}
|
| 48 |
+
{%- set conversation_messages = messages -%}
|
| 49 |
+
{%- if messages and messages[0].role == "system" -%}
|
| 50 |
+
{%- set system_message = messages[0] -%}
|
| 51 |
+
{%- set conversation_messages = messages[1:] -%}
|
| 52 |
+
{%- endif -%}
|
| 53 |
+
{#- Get the last user message turn, for interleved thinking -#}
|
| 54 |
+
{%- set ns = namespace(last_user_index=-1) %}
|
| 55 |
+
{% for m in conversation_messages %}
|
| 56 |
+
{%- if m.role == 'user' %}
|
| 57 |
+
{% set ns.last_user_index = loop.index0 -%}
|
| 58 |
+
{%- endif %}
|
| 59 |
+
{%- endfor %}
|
| 60 |
+
{#- Render system message -#}
|
| 61 |
+
{{- ']~!b[' ~ ']~b]system' ~ '\n' }}
|
| 62 |
+
{{- build_system_message(system_message) }}
|
| 63 |
+
{#- Render tools if available -#}
|
| 64 |
+
{%- if tools -%}
|
| 65 |
+
{{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
|
| 66 |
+
{{- '\n' ~ '<tools>' ~ '\n' }}
|
| 67 |
+
{{- render_tool_namespace("functions", tools) }}
|
| 68 |
+
{{- '</tools>' ~ '\n\n' }}
|
| 69 |
+
{{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
|
| 70 |
+
{{- '\n' ~ toolcall_begin_token }}
|
| 71 |
+
<invoke name="tool-name-1">
|
| 72 |
+
<parameter name="param-key-1">param-value-1</parameter>
|
| 73 |
+
<parameter name="param-key-2">param-value-2</parameter>
|
| 74 |
+
...
|
| 75 |
+
</invoke>
|
| 76 |
+
{{- '\n' ~ toolcall_end_token }}
|
| 77 |
+
{%- endif -%}
|
| 78 |
+
{{- '[e~[\n' }}
|
| 79 |
+
|
| 80 |
+
{#- Render messages -#}
|
| 81 |
+
{%- set last_tool_call = namespace(name=none) -%}
|
| 82 |
+
{%- for message in conversation_messages -%}
|
| 83 |
+
{%- if message.role == 'assistant' -%}
|
| 84 |
+
{#- Only render reasoning_content if no user message follows -#}
|
| 85 |
+
{{- ']~b]ai' ~ '\n' }}
|
| 86 |
+
|
| 87 |
+
{%- set reasoning_content = '' %}
|
| 88 |
+
{%- set content = visible_text(message.content) %}
|
| 89 |
+
{%- if message.reasoning_content is string %}
|
| 90 |
+
{%- set reasoning_content = message.reasoning_content %}
|
| 91 |
+
{%- else %}
|
| 92 |
+
{%- if '</think>' in content %}
|
| 93 |
+
{%- set reasoning_content = content.split('</think>')[0].strip('\n').split('<think>')[-1].strip('\n') %}
|
| 94 |
+
{%- set content = content.split('</think>')[-1].strip('\n') %}
|
| 95 |
+
{%- endif %}
|
| 96 |
+
{%- endif %}
|
| 97 |
+
{%- if reasoning_content and loop.index0 > ns.last_user_index -%}
|
| 98 |
+
{{- '<think>' ~ '\n' ~ reasoning_content ~ '\n' ~ '</think>' ~ '\n\n' }}
|
| 99 |
+
{%- endif -%}
|
| 100 |
+
{%- if content -%}
|
| 101 |
+
{{- content }}
|
| 102 |
+
{%- endif -%}
|
| 103 |
+
{%- if message.tool_calls -%}
|
| 104 |
+
{{- '\n' ~ toolcall_begin_token ~ '\n' }}
|
| 105 |
+
|
| 106 |
+
{%- for tool_call in message.tool_calls -%}
|
| 107 |
+
{%- if tool_call.function %}
|
| 108 |
+
{%- set tool_call = tool_call.function %}
|
| 109 |
+
{%- endif %}
|
| 110 |
+
{{- '<invoke name="' + tool_call.name + '">' }}
|
| 111 |
+
{% set _args = tool_call.arguments %}
|
| 112 |
+
{%- for k, v in _args.items() %}
|
| 113 |
+
{{- '<parameter name="' + k + '">' }}
|
| 114 |
+
{{- v | tojson(ensure_ascii=False) if v is not string else v }}
|
| 115 |
+
{{- '</parameter>' }}
|
| 116 |
+
{% endfor %}
|
| 117 |
+
{{- '</invoke>' ~ '\n' }}
|
| 118 |
+
{%- endfor -%}
|
| 119 |
+
|
| 120 |
+
{{- toolcall_end_token}}
|
| 121 |
+
{%- set last_tool_call.name = message.tool_calls[-1].name -%}
|
| 122 |
+
{%- else -%}
|
| 123 |
+
{%- set last_tool_call.name = none -%}
|
| 124 |
+
{%- endif -%}
|
| 125 |
+
{{- '[e~[' ~ '\n' }}
|
| 126 |
+
|
| 127 |
+
{%- elif message.role == 'tool' -%}
|
| 128 |
+
{%- if last_tool_call.name is none -%}
|
| 129 |
+
{{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
|
| 130 |
+
{%- endif -%}
|
| 131 |
+
{%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
|
| 132 |
+
{{- ']~b]tool' }}
|
| 133 |
+
{%- endif -%}
|
| 134 |
+
{%- if message.content is string -%}
|
| 135 |
+
{{- '\n<response>' }}
|
| 136 |
+
{{- message.content }}
|
| 137 |
+
{{- '</response>' }}
|
| 138 |
+
{%- else -%}
|
| 139 |
+
{%- for tr in message.content -%}
|
| 140 |
+
{{- '\n<response>' }}
|
| 141 |
+
{{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
|
| 142 |
+
{{- '\n</response>' }}
|
| 143 |
+
{%- endfor -%}
|
| 144 |
+
{%- endif -%}
|
| 145 |
+
{%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
|
| 146 |
+
{{- '[e~[\n' -}}
|
| 147 |
+
{%- endif -%}
|
| 148 |
+
|
| 149 |
+
{%- elif message.role == 'user' -%}
|
| 150 |
+
{{- ']~b]user' ~ '\n' }}
|
| 151 |
+
{{- visible_text(message.content) }}
|
| 152 |
+
{{- '[e~[' ~ '\n' }}
|
| 153 |
+
{%- endif -%}
|
| 154 |
+
{%- endfor -%}
|
| 155 |
+
|
| 156 |
+
{#- Generation prompt -#}
|
| 157 |
+
{%- if add_generation_prompt -%}
|
| 158 |
+
{{- ']~b]ai' ~ '\n' ~ '<think>' ~ '\n' }}
|
| 159 |
+
{%- endif -%}
|
config.json
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"MiniMaxM2ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"attn_type_list": [
|
| 7 |
+
1,
|
| 8 |
+
1,
|
| 9 |
+
1,
|
| 10 |
+
1,
|
| 11 |
+
1,
|
| 12 |
+
1,
|
| 13 |
+
1,
|
| 14 |
+
1,
|
| 15 |
+
1,
|
| 16 |
+
1,
|
| 17 |
+
1,
|
| 18 |
+
1,
|
| 19 |
+
1,
|
| 20 |
+
1,
|
| 21 |
+
1,
|
| 22 |
+
1,
|
| 23 |
+
1,
|
| 24 |
+
1,
|
| 25 |
+
1,
|
| 26 |
+
1,
|
| 27 |
+
1,
|
| 28 |
+
1,
|
| 29 |
+
1,
|
| 30 |
+
1,
|
| 31 |
+
1,
|
| 32 |
+
1,
|
| 33 |
+
1,
|
| 34 |
+
1,
|
| 35 |
+
1,
|
| 36 |
+
1,
|
| 37 |
+
1,
|
| 38 |
+
1,
|
| 39 |
+
1,
|
| 40 |
+
1,
|
| 41 |
+
1,
|
| 42 |
+
1,
|
| 43 |
+
1,
|
| 44 |
+
1,
|
| 45 |
+
1,
|
| 46 |
+
1,
|
| 47 |
+
1,
|
| 48 |
+
1,
|
| 49 |
+
1,
|
| 50 |
+
1,
|
| 51 |
+
1,
|
| 52 |
+
1,
|
| 53 |
+
1,
|
| 54 |
+
1,
|
| 55 |
+
1,
|
| 56 |
+
1,
|
| 57 |
+
1,
|
| 58 |
+
1,
|
| 59 |
+
1,
|
| 60 |
+
1,
|
| 61 |
+
1,
|
| 62 |
+
1,
|
| 63 |
+
1,
|
| 64 |
+
1,
|
| 65 |
+
1,
|
| 66 |
+
1,
|
| 67 |
+
1,
|
| 68 |
+
1
|
| 69 |
+
],
|
| 70 |
+
"attn_window_size": null,
|
| 71 |
+
"auto_map": {
|
| 72 |
+
"AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
|
| 73 |
+
"AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
|
| 74 |
+
},
|
| 75 |
+
"dtype": "bfloat16",
|
| 76 |
+
"eos_token_id": 200020,
|
| 77 |
+
"head_dim": 128,
|
| 78 |
+
"hidden_act": "silu",
|
| 79 |
+
"hidden_size": 3072,
|
| 80 |
+
"initializer_range": 0.02,
|
| 81 |
+
"intermediate_size": 1536,
|
| 82 |
+
"layernorm_full_attention_beta": 1.0,
|
| 83 |
+
"layernorm_linear_attention_beta": 1.0,
|
| 84 |
+
"layernorm_mlp_beta": 1.0,
|
| 85 |
+
"max_model_len": null,
|
| 86 |
+
"max_position_embeddings": 196608,
|
| 87 |
+
"mlp_intermediate_size": 8192,
|
| 88 |
+
"model_type": "minimax",
|
| 89 |
+
"mtp_transformer_layers": 1,
|
| 90 |
+
"num_attention_heads": 48,
|
| 91 |
+
"num_expert_group": null,
|
| 92 |
+
"num_experts_per_tok": 8,
|
| 93 |
+
"num_hidden_layers": 62,
|
| 94 |
+
"num_key_value_heads": 8,
|
| 95 |
+
"num_local_experts": 256,
|
| 96 |
+
"num_mtp_modules": 3,
|
| 97 |
+
"output_router_logits": false,
|
| 98 |
+
"pad_token_id": 200020,
|
| 99 |
+
"partial_rotary_factor": 0.5,
|
| 100 |
+
"qk_norm_type": "per_layer",
|
| 101 |
+
"quantization_config": {
|
| 102 |
+
"bits": 4,
|
| 103 |
+
"checkpoint_format": "gptq",
|
| 104 |
+
"desc_act": false,
|
| 105 |
+
"group_size": 32,
|
| 106 |
+
"lm_head": false,
|
| 107 |
+
"meta": {
|
| 108 |
+
"act_group_aware": true,
|
| 109 |
+
"damp_auto_increment": 0.01,
|
| 110 |
+
"damp_percent": 0.05,
|
| 111 |
+
"mse": 0.0,
|
| 112 |
+
"quantizer": [
|
| 113 |
+
"gptqmodel:5.1.0-dev"
|
| 114 |
+
],
|
| 115 |
+
"static_groups": false,
|
| 116 |
+
"true_sequential": true,
|
| 117 |
+
"uri": "https://github.com/modelcloud/gptqmodel",
|
| 118 |
+
"v2": false,
|
| 119 |
+
"v2_alpha": 0.25
|
| 120 |
+
},
|
| 121 |
+
"pack_dtype": "int32",
|
| 122 |
+
"pack_impl": "cpu",
|
| 123 |
+
"quant_method": "gptq",
|
| 124 |
+
"sym": true
|
| 125 |
+
},
|
| 126 |
+
"rms_norm_eps": 1e-06,
|
| 127 |
+
"rope_scaling": null,
|
| 128 |
+
"rope_theta": 5000000,
|
| 129 |
+
"rotary_dim": 64,
|
| 130 |
+
"routed_scaling_factor": 1.0,
|
| 131 |
+
"router_aux_loss_coef": 0.001,
|
| 132 |
+
"router_jitter_noise": 0.0,
|
| 133 |
+
"scoring_func": "sigmoid",
|
| 134 |
+
"shared_intermediate_size": 0,
|
| 135 |
+
"shared_moe_mode": "sigmoid",
|
| 136 |
+
"sliding_window": null,
|
| 137 |
+
"swa_rope_theta": -1.0,
|
| 138 |
+
"tie_word_embeddings": false,
|
| 139 |
+
"topk_group": null,
|
| 140 |
+
"transformers_version": "4.57.1",
|
| 141 |
+
"use_cache": true,
|
| 142 |
+
"use_grouped_topk": true,
|
| 143 |
+
"use_mtp": true,
|
| 144 |
+
"use_qk_norm": true,
|
| 145 |
+
"use_routing_bias": true,
|
| 146 |
+
"vocab_size": 200064
|
| 147 |
+
}
|
configuration_minimax_m2.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2024-2025 ModelCloud.ai
|
| 2 |
+
# SPDX-FileCopyrightText: 2024-2025 qubitium@modelcloud.ai
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
# Contact: qubitium@modelcloud.ai, x.com/qubitium
|
| 5 |
+
|
| 6 |
+
"""Configuration for the MiniMax M2 architecture."""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from typing import List, Optional, Union
|
| 11 |
+
|
| 12 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class MiniMaxM2Config(PretrainedConfig):
|
| 16 |
+
model_type = "minimax"
|
| 17 |
+
|
| 18 |
+
def __init__(
|
| 19 |
+
self,
|
| 20 |
+
vocab_size: int = 200_064,
|
| 21 |
+
hidden_size: int = 3_072,
|
| 22 |
+
intermediate_size: int = 1_536,
|
| 23 |
+
mlp_intermediate_size: int = 8_192,
|
| 24 |
+
num_hidden_layers: int = 62,
|
| 25 |
+
num_attention_heads: int = 48,
|
| 26 |
+
num_key_value_heads: int = 8,
|
| 27 |
+
head_dim: Optional[int] = 128,
|
| 28 |
+
num_local_experts: int = 256,
|
| 29 |
+
num_experts_per_tok: int = 8,
|
| 30 |
+
attn_type_list: Optional[List[int]] = None,
|
| 31 |
+
attention_dropout: float = 0.0,
|
| 32 |
+
hidden_act: str = "silu",
|
| 33 |
+
rms_norm_eps: float = 1e-6,
|
| 34 |
+
max_position_embeddings: int = 196_608,
|
| 35 |
+
rope_theta: float = 5_000_000.0,
|
| 36 |
+
rotary_dim: int = 64,
|
| 37 |
+
rope_scaling: Optional[dict] = None,
|
| 38 |
+
use_qk_norm: bool = True,
|
| 39 |
+
qk_norm_type: str = "per_layer",
|
| 40 |
+
use_routing_bias: bool = True,
|
| 41 |
+
scoring_func: str = "sigmoid",
|
| 42 |
+
router_aux_loss_coef: float = 0.001,
|
| 43 |
+
router_jitter_noise: float = 0.0,
|
| 44 |
+
output_router_logits: bool = False,
|
| 45 |
+
use_grouped_topk: bool = True,
|
| 46 |
+
num_expert_group: Optional[int] = None,
|
| 47 |
+
topk_group: Optional[int] = None,
|
| 48 |
+
routed_scaling_factor: float = 1.0,
|
| 49 |
+
layernorm_full_attention_beta: float = 1.0,
|
| 50 |
+
layernorm_linear_attention_beta: float = 1.0,
|
| 51 |
+
layernorm_mlp_beta: float = 1.0,
|
| 52 |
+
shared_intermediate_size: int = 0,
|
| 53 |
+
shared_moe_mode: str = "sigmoid",
|
| 54 |
+
use_mtp: bool = True,
|
| 55 |
+
num_mtp_modules: int = 3,
|
| 56 |
+
mtp_transformer_layers: int = 1,
|
| 57 |
+
attn_window_size: Optional[Union[int, List[int]]] = None,
|
| 58 |
+
swa_rope_theta: float = -1.0,
|
| 59 |
+
sliding_window: Optional[int] = None,
|
| 60 |
+
initializer_range: float = 0.02,
|
| 61 |
+
tie_word_embeddings: bool = False,
|
| 62 |
+
max_model_len: Optional[int] = None,
|
| 63 |
+
bos_token_id: Optional[int] = None,
|
| 64 |
+
eos_token_id: Optional[int] = None,
|
| 65 |
+
pad_token_id: Optional[int] = None,
|
| 66 |
+
use_cache: bool = True,
|
| 67 |
+
**kwargs,
|
| 68 |
+
) -> None:
|
| 69 |
+
quantization_config = kwargs.pop("quantization_config", None)
|
| 70 |
+
transformers_version = kwargs.pop("transformers_version", None)
|
| 71 |
+
|
| 72 |
+
super().__init__(
|
| 73 |
+
bos_token_id=bos_token_id,
|
| 74 |
+
eos_token_id=eos_token_id,
|
| 75 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 76 |
+
pad_token_id=pad_token_id,
|
| 77 |
+
**kwargs,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
self.vocab_size = vocab_size
|
| 81 |
+
self.hidden_size = hidden_size
|
| 82 |
+
self.intermediate_size = intermediate_size
|
| 83 |
+
self.mlp_intermediate_size = mlp_intermediate_size
|
| 84 |
+
self.num_hidden_layers = num_hidden_layers
|
| 85 |
+
self.num_attention_heads = num_attention_heads
|
| 86 |
+
self.num_key_value_heads = num_key_value_heads
|
| 87 |
+
self.head_dim = head_dim or hidden_size // num_attention_heads
|
| 88 |
+
self.num_local_experts = num_local_experts
|
| 89 |
+
self.num_experts_per_tok = num_experts_per_tok
|
| 90 |
+
self.attn_type_list = attn_type_list or [1] * num_hidden_layers
|
| 91 |
+
self.attention_dropout = attention_dropout
|
| 92 |
+
self.hidden_act = hidden_act
|
| 93 |
+
self.rms_norm_eps = rms_norm_eps
|
| 94 |
+
self.max_position_embeddings = max_position_embeddings
|
| 95 |
+
self.rope_theta = rope_theta
|
| 96 |
+
self.rotary_dim = rotary_dim
|
| 97 |
+
self.rope_scaling = rope_scaling
|
| 98 |
+
self.use_qk_norm = use_qk_norm
|
| 99 |
+
self.qk_norm_type = qk_norm_type
|
| 100 |
+
self.use_routing_bias = use_routing_bias
|
| 101 |
+
self.scoring_func = scoring_func
|
| 102 |
+
self.router_aux_loss_coef = router_aux_loss_coef
|
| 103 |
+
self.router_jitter_noise = router_jitter_noise
|
| 104 |
+
self.output_router_logits = output_router_logits
|
| 105 |
+
self.use_grouped_topk = use_grouped_topk
|
| 106 |
+
self.num_expert_group = num_expert_group
|
| 107 |
+
self.topk_group = topk_group
|
| 108 |
+
self.routed_scaling_factor = routed_scaling_factor
|
| 109 |
+
self.layernorm_full_attention_beta = layernorm_full_attention_beta
|
| 110 |
+
self.layernorm_linear_attention_beta = layernorm_linear_attention_beta
|
| 111 |
+
self.layernorm_mlp_beta = layernorm_mlp_beta
|
| 112 |
+
self.shared_intermediate_size = shared_intermediate_size
|
| 113 |
+
self.shared_moe_mode = shared_moe_mode
|
| 114 |
+
self.use_mtp = use_mtp
|
| 115 |
+
self.num_mtp_modules = num_mtp_modules
|
| 116 |
+
self.mtp_transformer_layers = mtp_transformer_layers
|
| 117 |
+
self.attn_window_size = attn_window_size
|
| 118 |
+
self.swa_rope_theta = swa_rope_theta
|
| 119 |
+
self.sliding_window = sliding_window
|
| 120 |
+
self.initializer_range = initializer_range
|
| 121 |
+
self.max_model_len = max_model_len
|
| 122 |
+
self.use_cache = use_cache
|
| 123 |
+
|
| 124 |
+
# Convenient accessor used by rotary embedding helper
|
| 125 |
+
self.partial_rotary_factor = float(self.rotary_dim) / float(self.head_dim)
|
| 126 |
+
if quantization_config is not None:
|
| 127 |
+
self.quantization_config = quantization_config
|
| 128 |
+
self.transformers_version = transformers_version
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
__all__ = ["MiniMaxM2Config"]
|
generation_config.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_sample": true,
|
| 3 |
+
"top_k": 40,
|
| 4 |
+
"transformers_version": "4.57.1"
|
| 5 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e8fc118edae49d868e455650309fd73bc4a92d7228f4b51193a00cdc848f851
|
| 3 |
+
size 4295175437
|
model-00002-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51fd7ecc9b54bd392e3c52c372e9c5aacb7a71d15c809ce74eaec292cced23d2
|
| 3 |
+
size 4295719674
|
model-00003-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a73372b2d17430a01cc95b223b6e4d033328eb15f4c57e911ae2a15c7beb0b57
|
| 3 |
+
size 4293562869
|
model-00004-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:baf9022bfb41cf6457d516377085d3f206f2be16a1d7481d9ee47be4d8273f0b
|
| 3 |
+
size 4295719674
|
model-00005-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ad329edbee8be7bfe631f9f28f14af916acf67e9132fa15fea55f2736d5c47f
|
| 3 |
+
size 4293562869
|
model-00006-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7d7cec1325cd46ee7f97d44c7937d3d94ff2af6842c6e89dd1706d477ca476c
|
| 3 |
+
size 4295722464
|
model-00007-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e51de987e929bbc1032e461d21b2d312fbf384a79b0fe52dd126d084add72ff5
|
| 3 |
+
size 4293569100
|
model-00008-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bcc70556d40117eef495d6f052eaa03e5b552a2acb105fcf2614b32c62d804d7
|
| 3 |
+
size 4295725907
|
model-00009-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c942ad83b87d4b03e03c07ccc0a199468f23d93fda71081766b5254292f23d2
|
| 3 |
+
size 4293569100
|
model-00010-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f15a1e0119d2603353d2aa3159bb237b7be6bb0c1f3c7f2d027756d162bce3ef
|
| 3 |
+
size 4295725915
|
model-00011-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f35fbfea62ee1c38fe900f44b1e862cfee983297bbe419abf1ea6421087f1d5
|
| 3 |
+
size 4293569138
|
model-00012-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69a242da922ae92bc9f64dd112c0715ed658d0af97dfa05f8b3c941df22e756e
|
| 3 |
+
size 4295073051
|
model-00013-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fea2197cfbb3c34afc6df7fb197404a324534b5b976e9cadce798a93ff79688f
|
| 3 |
+
size 4295097945
|
model-00014-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8db57387dadca66946350af510d9e5f9348a33f134dc13e418639f38213ab57d
|
| 3 |
+
size 4295725753
|
model-00015-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69d93ae081e269ae24f51b1613d267695b7105510b2f3a516127b099dea4b4ba
|
| 3 |
+
size 4293568978
|
model-00016-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e7f60f293929bb5cdd7c7a901955a7c10285fdc5b0d9dd638529951c2e07c47
|
| 3 |
+
size 4295725808
|
model-00017-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2c30d9015c59677a3877269e8d9b04231209fbfae4df1482e3b8e8b9098c1b12
|
| 3 |
+
size 4293569001
|
model-00018-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66c5031f24a38cb17eeb8ec95cdec6cd95f9d0373e3436689f572c719e973c32
|
| 3 |
+
size 4295725808
|
model-00019-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b1f103717c3c6fe5f84837d1235dbd27e959c86897ea44f9c7cecf3c0f475e0
|
| 3 |
+
size 4293569001
|
model-00020-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:78c59c1a3fff90c81833659752b7f55c4835c065f13741dc31b364fb2816bf4b
|
| 3 |
+
size 4295725808
|
model-00021-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9cd302fbba3fb85e28be1ac1089e5621a3c4348e2cf6f8aafceec6b8481813dd
|
| 3 |
+
size 4293569001
|
model-00022-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f27dfc5d0a203870808f14a666a9deea1b85fe64a5e7077be527cc3f07fdaccb
|
| 3 |
+
size 4295725808
|
model-00023-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7313344f812473958a0b4f093c1b035b8402844000f08170a1ea2edc697b0ba0
|
| 3 |
+
size 4293569001
|
model-00024-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f34f164828af80909ede3b800dc8b400e6d2ada0fac788c46f80375fcee05a05
|
| 3 |
+
size 4295725808
|
model-00025-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf9d077821bff104bc7ce4fb0ae2a07f156692b758dd4192f00d63a34c3a9913
|
| 3 |
+
size 4293569001
|
model-00026-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a434beca2ab48bf034a9d61cb1ff988e97f1884ec9eb35dcd88e21748aa423a6
|
| 3 |
+
size 4295725808
|
model-00027-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b0cc0600be339c3e27bea91c3d4a643f01995f5892f2231fa3901f5e2fd5f719
|
| 3 |
+
size 4293569001
|
model-00028-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:612ede8611d412b536008f4c70f726baf9053f3787f054d1205f28480e4e0b03
|
| 3 |
+
size 4295725808
|
model-00029-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79fc5c5c33b4a7431ec405f517407f078df5165caac256f75a3bb9a89f1d7509
|
| 3 |
+
size 4293569001
|
model-00030-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10ff320c87e941702b1b5994a4e58b91ad441b3bafb41f29591d982dcc6dc7f7
|
| 3 |
+
size 4295725808
|
model-00031-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88210e12bd9d871625f0992ad945369a12cec0e3a605c67d33eb45ffd653fe64
|
| 3 |
+
size 4293569001
|
model-00032-of-00032.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d276ef8b533f3c7d942492869dc6f6ca21c9259ec21978ea155e47c17b284f2a
|
| 3 |
+
size 1410120148
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9cdd2c0bb87cd37009ee6c8e49f2eb4ee9e489b40b12709815e6e0d2a90da452
|
| 3 |
+
size 18673475
|
modeling_minimax_m2.py
ADDED
|
@@ -0,0 +1,765 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2024-2025 ModelCloud.ai
|
| 2 |
+
# SPDX-FileCopyrightText: 2024-2025 qubitium@modelcloud.ai
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
# Contact: qubitium@modelcloud.ai, x.com/qubitium
|
| 5 |
+
|
| 6 |
+
"""PyTorch implementation of the MiniMax M2 architecture for Hugging Face Transformers."""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import copy
|
| 11 |
+
import time
|
| 12 |
+
from typing import Optional, Tuple, Union
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
from torch import nn
|
| 17 |
+
|
| 18 |
+
from transformers.activations import ACT2FN
|
| 19 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 20 |
+
from transformers.generation import GenerationMixin
|
| 21 |
+
from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
|
| 22 |
+
from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
|
| 23 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 24 |
+
from transformers.utils import logging
|
| 25 |
+
|
| 26 |
+
from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding, repeat_kv, rotate_half
|
| 27 |
+
|
| 28 |
+
from .configuration_minimax_m2 import MiniMaxM2Config
|
| 29 |
+
|
| 30 |
+
logger = logging.get_logger(__name__)
|
| 31 |
+
|
| 32 |
+
_CONFIG_FOR_DOC = "MiniMaxM2Config"
|
| 33 |
+
_CHECKPOINT_FOR_DOC = "MiniMaxAI/MiniMax-M2"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def load_balancing_loss_func(
|
| 37 |
+
gate_logits: Union[torch.Tensor, Tuple[torch.Tensor, ...]],
|
| 38 |
+
num_experts: int,
|
| 39 |
+
top_k: int,
|
| 40 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 41 |
+
) -> torch.Tensor:
|
| 42 |
+
if gate_logits is None:
|
| 43 |
+
return torch.tensor(0.0)
|
| 44 |
+
if isinstance(gate_logits, torch.Tensor):
|
| 45 |
+
logits = gate_logits
|
| 46 |
+
else:
|
| 47 |
+
logits = torch.cat([layer_gate.to(gate_logits[0].device) for layer_gate in gate_logits], dim=0)
|
| 48 |
+
|
| 49 |
+
routing_weights = torch.softmax(logits, dim=-1, dtype=torch.float32)
|
| 50 |
+
_, selected = torch.topk(routing_weights, top_k, dim=-1)
|
| 51 |
+
expert_mask = torch.nn.functional.one_hot(selected, num_experts)
|
| 52 |
+
|
| 53 |
+
if attention_mask is None:
|
| 54 |
+
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
|
| 55 |
+
router_prob_per_expert = torch.mean(routing_weights, dim=0)
|
| 56 |
+
else:
|
| 57 |
+
batch_size, seq_len = attention_mask.shape
|
| 58 |
+
num_layers = logits.shape[0] // (batch_size * seq_len)
|
| 59 |
+
|
| 60 |
+
expanded_mask = (
|
| 61 |
+
attention_mask[None, :, :, None, None]
|
| 62 |
+
.expand(num_layers, batch_size, seq_len, top_k, num_experts)
|
| 63 |
+
.reshape(-1, top_k, num_experts)
|
| 64 |
+
.to(logits.device)
|
| 65 |
+
)
|
| 66 |
+
tokens_per_expert = torch.sum(expert_mask.float() * expanded_mask, dim=0) / torch.sum(expanded_mask, dim=0)
|
| 67 |
+
|
| 68 |
+
router_mask = (
|
| 69 |
+
attention_mask[None, :, :, None]
|
| 70 |
+
.expand(num_layers, batch_size, seq_len, num_experts)
|
| 71 |
+
.reshape(-1, num_experts)
|
| 72 |
+
.to(logits.device)
|
| 73 |
+
)
|
| 74 |
+
router_prob_per_expert = torch.sum(routing_weights * router_mask, dim=0) / torch.sum(router_mask, dim=0)
|
| 75 |
+
|
| 76 |
+
loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
|
| 77 |
+
return loss * num_experts
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def apply_rotary_pos_emb_partial(
|
| 81 |
+
q: torch.Tensor,
|
| 82 |
+
k: torch.Tensor,
|
| 83 |
+
cos: torch.Tensor,
|
| 84 |
+
sin: torch.Tensor,
|
| 85 |
+
rotary_dim: int,
|
| 86 |
+
unsqueeze_dim: int = 2,
|
| 87 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 88 |
+
cos = cos.unsqueeze(unsqueeze_dim)[..., :rotary_dim]
|
| 89 |
+
sin = sin.unsqueeze(unsqueeze_dim)[..., :rotary_dim]
|
| 90 |
+
q_rot = q[..., :rotary_dim]
|
| 91 |
+
k_rot = k[..., :rotary_dim]
|
| 92 |
+
|
| 93 |
+
q_rot = (q_rot * cos) + (rotate_half(q_rot) * sin)
|
| 94 |
+
k_rot = (k_rot * cos) + (rotate_half(k_rot) * sin)
|
| 95 |
+
|
| 96 |
+
q = torch.cat((q_rot, q[..., rotary_dim:]), dim=-1)
|
| 97 |
+
k = torch.cat((k_rot, k[..., rotary_dim:]), dim=-1)
|
| 98 |
+
return q, k
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class MiniMaxM2RMSNorm(nn.Module):
|
| 102 |
+
def __init__(self, hidden_size: int, eps: float = 1e-6) -> None:
|
| 103 |
+
super().__init__()
|
| 104 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 105 |
+
self.variance_epsilon = eps
|
| 106 |
+
|
| 107 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 108 |
+
input_dtype = hidden_states.dtype
|
| 109 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 110 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 111 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 112 |
+
return (self.weight * hidden_states).to(input_dtype)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class MiniMaxM2MLP(nn.Module):
|
| 116 |
+
def __init__(self, config: MiniMaxM2Config) -> None:
|
| 117 |
+
super().__init__()
|
| 118 |
+
self.hidden_size = config.hidden_size
|
| 119 |
+
self.intermediate_size = config.intermediate_size
|
| 120 |
+
|
| 121 |
+
self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 122 |
+
self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 123 |
+
self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 124 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 125 |
+
|
| 126 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 127 |
+
gate = self.act_fn(self.w1(hidden_states))
|
| 128 |
+
up = self.w3(hidden_states)
|
| 129 |
+
hidden_states = gate * up
|
| 130 |
+
hidden_states = self.w2(hidden_states)
|
| 131 |
+
return hidden_states
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class MiniMaxM2SparseMoeBlock(nn.Module):
|
| 135 |
+
def __init__(self, config: MiniMaxM2Config) -> None:
|
| 136 |
+
super().__init__()
|
| 137 |
+
self.hidden_dim = config.hidden_size
|
| 138 |
+
self.experts = nn.ModuleList([MiniMaxM2MLP(config) for _ in range(config.num_local_experts)])
|
| 139 |
+
self.num_experts = config.num_local_experts
|
| 140 |
+
self.top_k = config.num_experts_per_tok
|
| 141 |
+
self.jitter_noise = config.router_jitter_noise
|
| 142 |
+
self.use_routing_bias = config.use_routing_bias
|
| 143 |
+
self.scoring_func = getattr(config, "scoring_func", "softmax")
|
| 144 |
+
self.use_grouped_topk = getattr(config, "use_grouped_topk", False)
|
| 145 |
+
self.num_expert_group = getattr(config, "num_expert_group", None)
|
| 146 |
+
self.topk_group = getattr(config, "topk_group", None)
|
| 147 |
+
self.routed_scaling_factor = getattr(config, "routed_scaling_factor", 1.0)
|
| 148 |
+
|
| 149 |
+
if self.use_grouped_topk:
|
| 150 |
+
if self.num_expert_group is None or self.num_expert_group <= 0:
|
| 151 |
+
self.num_expert_group = 1
|
| 152 |
+
if self.topk_group is None or self.topk_group <= 0:
|
| 153 |
+
self.topk_group = min(self.num_expert_group, self.top_k)
|
| 154 |
+
else:
|
| 155 |
+
self.num_expert_group = 1
|
| 156 |
+
self.topk_group = 1
|
| 157 |
+
|
| 158 |
+
self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
|
| 159 |
+
if self.use_routing_bias:
|
| 160 |
+
self.e_score_correction_bias = nn.Parameter(torch.zeros(self.num_experts, dtype=torch.float32))
|
| 161 |
+
else:
|
| 162 |
+
self.register_parameter("e_score_correction_bias", None)
|
| 163 |
+
|
| 164 |
+
def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 165 |
+
batch_size, seq_len, hidden_dim = hidden_states.shape
|
| 166 |
+
if self.training and self.jitter_noise > 0:
|
| 167 |
+
noise = torch.empty_like(hidden_states).uniform_(
|
| 168 |
+
1.0 - self.jitter_noise,
|
| 169 |
+
1.0 + self.jitter_noise,
|
| 170 |
+
)
|
| 171 |
+
hidden_states = hidden_states * noise
|
| 172 |
+
|
| 173 |
+
hidden_states = hidden_states.view(-1, hidden_dim)
|
| 174 |
+
gate_dtype = self.gate.weight.dtype
|
| 175 |
+
router_logits = self.gate(hidden_states.to(gate_dtype)).to(torch.float32)
|
| 176 |
+
if self.e_score_correction_bias is not None:
|
| 177 |
+
# Bias is applied after scoring (see vLLM/SGLang implementations).
|
| 178 |
+
correction_bias = self.e_score_correction_bias.to(router_logits.device, router_logits.dtype)
|
| 179 |
+
else:
|
| 180 |
+
correction_bias = None
|
| 181 |
+
|
| 182 |
+
if self.scoring_func == "sigmoid":
|
| 183 |
+
scores = torch.sigmoid(router_logits)
|
| 184 |
+
elif self.scoring_func == "softmax":
|
| 185 |
+
scores = torch.softmax(router_logits, dim=-1)
|
| 186 |
+
else:
|
| 187 |
+
raise ValueError(f"Unsupported scoring function: {self.scoring_func}")
|
| 188 |
+
|
| 189 |
+
if correction_bias is not None:
|
| 190 |
+
original_scores = scores
|
| 191 |
+
scores = scores + correction_bias
|
| 192 |
+
else:
|
| 193 |
+
original_scores = scores
|
| 194 |
+
topk_scores: torch.Tensor
|
| 195 |
+
if self.use_grouped_topk and self.num_expert_group > 1:
|
| 196 |
+
experts_per_group = scores.size(-1) // self.num_expert_group
|
| 197 |
+
scores_grouped = scores.view(scores.size(0), self.num_expert_group, experts_per_group)
|
| 198 |
+
if correction_bias is not None:
|
| 199 |
+
topk_in_group = min(2, experts_per_group)
|
| 200 |
+
if topk_in_group > 0:
|
| 201 |
+
group_scores = scores_grouped.topk(topk_in_group, dim=-1)[0].sum(dim=-1)
|
| 202 |
+
else:
|
| 203 |
+
group_scores = torch.zeros_like(scores_grouped[..., 0])
|
| 204 |
+
else:
|
| 205 |
+
group_scores = scores_grouped.max(dim=-1).values
|
| 206 |
+
group_mask = torch.zeros_like(group_scores)
|
| 207 |
+
selected_groups = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=True).indices
|
| 208 |
+
group_mask.scatter_(1, selected_groups, 1.0)
|
| 209 |
+
mask = group_mask.unsqueeze(-1).expand(-1, -1, experts_per_group).reshape(scores.size())
|
| 210 |
+
masked_scores = scores.masked_fill(mask == 0, float("-inf"))
|
| 211 |
+
topk_scores, selected_experts = torch.topk(masked_scores, self.top_k, dim=-1, sorted=True)
|
| 212 |
+
else:
|
| 213 |
+
topk_scores, selected_experts = torch.topk(scores, self.top_k, dim=-1, sorted=True)
|
| 214 |
+
|
| 215 |
+
if correction_bias is not None:
|
| 216 |
+
routing_weights = original_scores.gather(1, selected_experts)
|
| 217 |
+
else:
|
| 218 |
+
routing_weights = topk_scores
|
| 219 |
+
|
| 220 |
+
routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True).clamp(min=1e-12)
|
| 221 |
+
if self.routed_scaling_factor != 1.0:
|
| 222 |
+
routing_weights = routing_weights * self.routed_scaling_factor
|
| 223 |
+
routing_weights = routing_weights.to(hidden_states.dtype)
|
| 224 |
+
selected_experts = selected_experts.to(torch.long)
|
| 225 |
+
|
| 226 |
+
final_hidden_states = torch.zeros_like(hidden_states)
|
| 227 |
+
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
|
| 228 |
+
expert_hit = torch.nonzero(expert_mask.sum(dim=(-1, -2)) > 0, as_tuple=False).flatten()
|
| 229 |
+
|
| 230 |
+
for expert_idx in expert_hit.tolist():
|
| 231 |
+
expert_layer = self.experts[expert_idx]
|
| 232 |
+
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
|
| 233 |
+
token_states = hidden_states.index_select(0, top_x)
|
| 234 |
+
expert_output = expert_layer(token_states) * routing_weights[top_x, idx].unsqueeze(-1)
|
| 235 |
+
final_hidden_states.index_add_(0, top_x, expert_output.to(final_hidden_states.dtype))
|
| 236 |
+
|
| 237 |
+
final_hidden_states = final_hidden_states.view(batch_size, seq_len, hidden_dim)
|
| 238 |
+
return final_hidden_states, router_logits
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
class MiniMaxM2Attention(nn.Module):
|
| 242 |
+
def __init__(self, config: MiniMaxM2Config, layer_idx: int) -> None:
|
| 243 |
+
super().__init__()
|
| 244 |
+
self.config = config
|
| 245 |
+
self.layer_idx = layer_idx
|
| 246 |
+
|
| 247 |
+
self.head_dim = config.head_dim
|
| 248 |
+
self.num_heads = config.num_attention_heads
|
| 249 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 250 |
+
self.num_key_value_groups = self.num_heads // max(1, self.num_key_value_heads)
|
| 251 |
+
self.rotary_dim = config.rotary_dim
|
| 252 |
+
self.scaling = self.head_dim**-0.5
|
| 253 |
+
self.attention_dropout = config.attention_dropout
|
| 254 |
+
self.is_causal = True
|
| 255 |
+
|
| 256 |
+
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
|
| 257 |
+
max_model_len = getattr(config, "max_model_len", None)
|
| 258 |
+
if max_model_len is not None:
|
| 259 |
+
max_position_embeddings = max(max_position_embeddings, max_model_len)
|
| 260 |
+
|
| 261 |
+
attn_window_size = getattr(config, "attn_window_size", None)
|
| 262 |
+
if isinstance(attn_window_size, list):
|
| 263 |
+
sliding_window = attn_window_size[layer_idx]
|
| 264 |
+
else:
|
| 265 |
+
sliding_window = attn_window_size
|
| 266 |
+
if sliding_window is not None and sliding_window <= 0:
|
| 267 |
+
sliding_window = None
|
| 268 |
+
self.sliding_window = sliding_window
|
| 269 |
+
|
| 270 |
+
swa_rope_theta = getattr(config, "swa_rope_theta", -1.0)
|
| 271 |
+
rope_theta = config.rope_theta
|
| 272 |
+
if self.sliding_window is not None and swa_rope_theta > 0:
|
| 273 |
+
rope_theta = swa_rope_theta
|
| 274 |
+
|
| 275 |
+
self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| 276 |
+
self.k_proj = nn.Linear(config.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| 277 |
+
self.v_proj = nn.Linear(config.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| 278 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, config.hidden_size, bias=False)
|
| 279 |
+
|
| 280 |
+
self.use_qk_norm = config.use_qk_norm
|
| 281 |
+
if self.use_qk_norm:
|
| 282 |
+
self.q_norm = MiniMaxM2RMSNorm(self.num_heads * self.head_dim, eps=config.rms_norm_eps)
|
| 283 |
+
self.k_norm = MiniMaxM2RMSNorm(self.num_key_value_heads * self.head_dim, eps=config.rms_norm_eps)
|
| 284 |
+
|
| 285 |
+
rope_config = copy.deepcopy(config)
|
| 286 |
+
rope_config.hidden_size = config.hidden_size
|
| 287 |
+
rope_config.num_attention_heads = config.num_attention_heads
|
| 288 |
+
rope_config.partial_rotary_factor = float(config.rotary_dim) / float(self.head_dim)
|
| 289 |
+
rope_config.rope_theta = rope_theta
|
| 290 |
+
rope_config.max_position_embeddings = max_position_embeddings
|
| 291 |
+
self.rotary_emb = LlamaRotaryEmbedding(rope_config)
|
| 292 |
+
|
| 293 |
+
def forward(
|
| 294 |
+
self,
|
| 295 |
+
hidden_states: torch.Tensor,
|
| 296 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 297 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 298 |
+
past_key_values: Optional[Cache] = None,
|
| 299 |
+
use_cache: Optional[bool] = False,
|
| 300 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 301 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 302 |
+
output_attentions: bool = False,
|
| 303 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 304 |
+
bsz, q_len, _ = hidden_states.size()
|
| 305 |
+
|
| 306 |
+
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 307 |
+
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 308 |
+
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 309 |
+
|
| 310 |
+
if self.use_qk_norm:
|
| 311 |
+
q_flat = query_states.transpose(1, 2).reshape(bsz * q_len, -1)
|
| 312 |
+
k_flat = key_states.transpose(1, 2).reshape(bsz * q_len, -1)
|
| 313 |
+
q_flat = self.q_norm(q_flat)
|
| 314 |
+
k_flat = self.k_norm(k_flat)
|
| 315 |
+
query_states = q_flat.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 316 |
+
key_states = k_flat.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 317 |
+
|
| 318 |
+
if position_embeddings is None:
|
| 319 |
+
cos, sin = self.rotary_emb(value_states, position_ids)
|
| 320 |
+
else:
|
| 321 |
+
cos, sin = position_embeddings
|
| 322 |
+
|
| 323 |
+
query_states, key_states = apply_rotary_pos_emb_partial(
|
| 324 |
+
query_states.transpose(1, 2), key_states.transpose(1, 2), cos, sin, self.rotary_dim
|
| 325 |
+
)
|
| 326 |
+
query_states = query_states.transpose(1, 2)
|
| 327 |
+
key_states = key_states.transpose(1, 2)
|
| 328 |
+
|
| 329 |
+
if past_key_values is not None:
|
| 330 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| 331 |
+
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 332 |
+
|
| 333 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 334 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 335 |
+
|
| 336 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(-2, -1)) * self.scaling
|
| 337 |
+
if attention_mask is not None:
|
| 338 |
+
attn_weights = attn_weights + attention_mask
|
| 339 |
+
|
| 340 |
+
if self.sliding_window is not None and past_key_values is None:
|
| 341 |
+
query_positions = torch.arange(q_len, device=hidden_states.device).view(1, 1, q_len, 1)
|
| 342 |
+
key_positions = torch.arange(key_states.shape[-2], device=hidden_states.device).view(1, 1, 1, -1)
|
| 343 |
+
window_mask = key_positions < (query_positions - self.sliding_window)
|
| 344 |
+
if window_mask.any():
|
| 345 |
+
attn_weights = attn_weights.masked_fill(window_mask, float("-inf"))
|
| 346 |
+
|
| 347 |
+
attn_weights = torch.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 348 |
+
if self.training and self.attention_dropout > 0:
|
| 349 |
+
attn_weights = F.dropout(attn_weights, p=self.attention_dropout)
|
| 350 |
+
|
| 351 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 352 |
+
attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, -1)
|
| 353 |
+
attn_output = self.o_proj(attn_output)
|
| 354 |
+
|
| 355 |
+
if not output_attentions:
|
| 356 |
+
attn_weights = None
|
| 357 |
+
return attn_output, attn_weights
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
class MiniMaxM2LogitsProcessor(nn.Module):
|
| 361 |
+
def __init__(self, config: MiniMaxM2Config) -> None:
|
| 362 |
+
super().__init__()
|
| 363 |
+
self.scale = getattr(config, "logits_scale", 1.0)
|
| 364 |
+
|
| 365 |
+
def forward(self, lm_head: nn.Module, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 366 |
+
logits = lm_head(hidden_states)
|
| 367 |
+
if self.scale != 1.0:
|
| 368 |
+
logits = logits * self.scale
|
| 369 |
+
return logits
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class MiniMaxM2DecoderLayer(nn.Module):
|
| 373 |
+
def __init__(self, config: MiniMaxM2Config, layer_idx: int) -> None:
|
| 374 |
+
super().__init__()
|
| 375 |
+
self.hidden_size = config.hidden_size
|
| 376 |
+
self.self_attn = MiniMaxM2Attention(config, layer_idx)
|
| 377 |
+
self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
|
| 378 |
+
self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 379 |
+
self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 380 |
+
|
| 381 |
+
def forward(
|
| 382 |
+
self,
|
| 383 |
+
hidden_states: torch.Tensor,
|
| 384 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 385 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 386 |
+
past_key_values: Optional[Cache] = None,
|
| 387 |
+
use_cache: Optional[bool] = False,
|
| 388 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 389 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 390 |
+
output_attentions: bool = False,
|
| 391 |
+
residual: Optional[torch.Tensor] = None,
|
| 392 |
+
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], torch.Tensor]:
|
| 393 |
+
residual_input = hidden_states if residual is None else residual
|
| 394 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 395 |
+
|
| 396 |
+
attn_output, attn_weights = self.self_attn(
|
| 397 |
+
hidden_states=hidden_states,
|
| 398 |
+
attention_mask=attention_mask,
|
| 399 |
+
position_ids=position_ids,
|
| 400 |
+
past_key_values=past_key_values,
|
| 401 |
+
use_cache=use_cache,
|
| 402 |
+
cache_position=cache_position,
|
| 403 |
+
position_embeddings=position_embeddings,
|
| 404 |
+
output_attentions=output_attentions,
|
| 405 |
+
)
|
| 406 |
+
hidden_states = residual_input + attn_output
|
| 407 |
+
|
| 408 |
+
residual_post_attn = hidden_states
|
| 409 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 410 |
+
moe_output, router_logits = self.block_sparse_moe(hidden_states)
|
| 411 |
+
hidden_states = residual_post_attn + moe_output
|
| 412 |
+
|
| 413 |
+
return hidden_states, hidden_states, router_logits, attn_weights
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
class MiniMaxM2PreTrainedModel(PreTrainedModel):
|
| 417 |
+
config_class = MiniMaxM2Config
|
| 418 |
+
base_model_prefix = "model"
|
| 419 |
+
supports_gradient_checkpointing = True
|
| 420 |
+
_no_split_modules = ["MiniMaxM2DecoderLayer"]
|
| 421 |
+
_supports_flash_attn = False
|
| 422 |
+
_supports_sdpa = False
|
| 423 |
+
_supports_attention_backend = False
|
| 424 |
+
|
| 425 |
+
def _init_weights(self, module: nn.Module) -> None:
|
| 426 |
+
if isinstance(module, nn.Linear):
|
| 427 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 428 |
+
if module.bias is not None:
|
| 429 |
+
module.bias.data.zero_()
|
| 430 |
+
elif isinstance(module, nn.Embedding):
|
| 431 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 432 |
+
if module.padding_idx is not None:
|
| 433 |
+
module.weight.data[module.padding_idx].zero_()
|
| 434 |
+
|
| 435 |
+
def _remap_qkv_weights(self, state_dict):
|
| 436 |
+
num_q = self.config.num_attention_heads * self.config.head_dim
|
| 437 |
+
num_kv = self.config.num_key_value_heads * self.config.head_dim
|
| 438 |
+
|
| 439 |
+
for layer_idx in range(self.config.num_hidden_layers):
|
| 440 |
+
prefix = f"model.layers.{layer_idx}.self_attn"
|
| 441 |
+
weight_key = f"{prefix}.qkv_proj.weight"
|
| 442 |
+
if weight_key in state_dict:
|
| 443 |
+
qkv_weight = state_dict.pop(weight_key)
|
| 444 |
+
q_weight, k_weight, v_weight = qkv_weight.split([num_q, num_kv, num_kv], dim=0)
|
| 445 |
+
state_dict.setdefault(f"{prefix}.q_proj.weight", q_weight)
|
| 446 |
+
state_dict.setdefault(f"{prefix}.k_proj.weight", k_weight)
|
| 447 |
+
state_dict.setdefault(f"{prefix}.v_proj.weight", v_weight)
|
| 448 |
+
|
| 449 |
+
def load_state_dict(self, state_dict, strict: bool = True):
|
| 450 |
+
if not isinstance(state_dict, dict):
|
| 451 |
+
raise TypeError(f"Expected state_dict to be dict, got {type(state_dict)}")
|
| 452 |
+
|
| 453 |
+
filtered_state_dict = {}
|
| 454 |
+
drop_suffixes = ("weight_scale_inv", "weight_scale", "input_scale", "scales", "amax")
|
| 455 |
+
for key, value in state_dict.items():
|
| 456 |
+
if key.endswith(drop_suffixes) or "fp8" in key:
|
| 457 |
+
continue
|
| 458 |
+
filtered_state_dict[key] = value
|
| 459 |
+
|
| 460 |
+
self._remap_qkv_weights(filtered_state_dict)
|
| 461 |
+
|
| 462 |
+
if logger.isEnabledFor(logging.INFO):
|
| 463 |
+
logger.info(
|
| 464 |
+
"MiniMaxM2: loading %d tensors (filtered from %d original).",
|
| 465 |
+
len(filtered_state_dict),
|
| 466 |
+
len(state_dict),
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
load_start = time.perf_counter()
|
| 470 |
+
result = super().load_state_dict(filtered_state_dict, strict=strict)
|
| 471 |
+
load_elapsed = time.perf_counter() - load_start
|
| 472 |
+
if logger.isEnabledFor(logging.INFO):
|
| 473 |
+
logger.info("MiniMaxM2: state_dict load finished in %.2f seconds.", load_elapsed)
|
| 474 |
+
|
| 475 |
+
return result
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
|
| 479 |
+
def __init__(self, config: MiniMaxM2Config) -> None:
|
| 480 |
+
super().__init__(config)
|
| 481 |
+
self.padding_idx = config.pad_token_id
|
| 482 |
+
self.vocab_size = config.vocab_size
|
| 483 |
+
|
| 484 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 485 |
+
self.layers = nn.ModuleList(
|
| 486 |
+
[MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 487 |
+
)
|
| 488 |
+
self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 489 |
+
self.gradient_checkpointing = False
|
| 490 |
+
|
| 491 |
+
self.post_init()
|
| 492 |
+
|
| 493 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 494 |
+
return self.embed_tokens
|
| 495 |
+
|
| 496 |
+
def set_input_embeddings(self, value: nn.Module) -> None:
|
| 497 |
+
self.embed_tokens = value
|
| 498 |
+
|
| 499 |
+
def forward(
|
| 500 |
+
self,
|
| 501 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 502 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 503 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 504 |
+
past_key_values: Optional[Cache] = None,
|
| 505 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 506 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 507 |
+
use_cache: Optional[bool] = None,
|
| 508 |
+
output_attentions: bool = False,
|
| 509 |
+
output_hidden_states: bool = False,
|
| 510 |
+
output_router_logits: Optional[bool] = None,
|
| 511 |
+
return_dict: Optional[bool] = None,
|
| 512 |
+
) -> Union[MoeModelOutputWithPast, Tuple]:
|
| 513 |
+
if (input_ids is None) == (inputs_embeds is None):
|
| 514 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds.")
|
| 515 |
+
|
| 516 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 517 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 518 |
+
output_router_logits = (
|
| 519 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 520 |
+
)
|
| 521 |
+
|
| 522 |
+
if inputs_embeds is None:
|
| 523 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 524 |
+
|
| 525 |
+
if use_cache and past_key_values is None:
|
| 526 |
+
past_key_values = DynamicCache(config=self.config)
|
| 527 |
+
|
| 528 |
+
if cache_position is None:
|
| 529 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 530 |
+
cache_position = torch.arange(
|
| 531 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
if position_ids is None:
|
| 535 |
+
position_ids = cache_position.unsqueeze(0)
|
| 536 |
+
|
| 537 |
+
if self.config.sliding_window is not None:
|
| 538 |
+
causal_mask = create_sliding_window_causal_mask(
|
| 539 |
+
config=self.config,
|
| 540 |
+
input_embeds=inputs_embeds,
|
| 541 |
+
attention_mask=attention_mask,
|
| 542 |
+
cache_position=cache_position,
|
| 543 |
+
past_key_values=past_key_values,
|
| 544 |
+
position_ids=position_ids,
|
| 545 |
+
)
|
| 546 |
+
else:
|
| 547 |
+
causal_mask = create_causal_mask(
|
| 548 |
+
config=self.config,
|
| 549 |
+
input_embeds=inputs_embeds,
|
| 550 |
+
attention_mask=attention_mask,
|
| 551 |
+
cache_position=cache_position,
|
| 552 |
+
past_key_values=past_key_values,
|
| 553 |
+
position_ids=position_ids,
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
hidden_states = inputs_embeds
|
| 557 |
+
|
| 558 |
+
all_hidden_states = () if output_hidden_states else None
|
| 559 |
+
all_attentions = () if output_attentions else None
|
| 560 |
+
all_router_logits = () if output_router_logits else None
|
| 561 |
+
|
| 562 |
+
residual = None
|
| 563 |
+
for decoder_layer in self.layers:
|
| 564 |
+
if output_hidden_states:
|
| 565 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 566 |
+
|
| 567 |
+
layer_outputs = decoder_layer(
|
| 568 |
+
hidden_states,
|
| 569 |
+
attention_mask=causal_mask,
|
| 570 |
+
position_ids=position_ids,
|
| 571 |
+
past_key_values=past_key_values,
|
| 572 |
+
use_cache=use_cache,
|
| 573 |
+
cache_position=cache_position,
|
| 574 |
+
position_embeddings=None,
|
| 575 |
+
output_attentions=output_attentions,
|
| 576 |
+
residual=residual,
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
hidden_states, residual, router_logits, attn_weights = layer_outputs
|
| 580 |
+
|
| 581 |
+
if output_router_logits:
|
| 582 |
+
all_router_logits = all_router_logits + (router_logits,)
|
| 583 |
+
if output_attentions:
|
| 584 |
+
all_attentions = all_attentions + (attn_weights,)
|
| 585 |
+
|
| 586 |
+
hidden_states = self.norm(hidden_states)
|
| 587 |
+
|
| 588 |
+
if output_hidden_states:
|
| 589 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 590 |
+
|
| 591 |
+
if not return_dict:
|
| 592 |
+
outputs = (hidden_states, past_key_values)
|
| 593 |
+
if output_hidden_states:
|
| 594 |
+
outputs += (all_hidden_states,)
|
| 595 |
+
if output_attentions:
|
| 596 |
+
outputs += (all_attentions,)
|
| 597 |
+
if output_router_logits:
|
| 598 |
+
outputs += (all_router_logits,)
|
| 599 |
+
return outputs
|
| 600 |
+
|
| 601 |
+
return MoeModelOutputWithPast(
|
| 602 |
+
last_hidden_state=hidden_states,
|
| 603 |
+
past_key_values=past_key_values,
|
| 604 |
+
hidden_states=all_hidden_states,
|
| 605 |
+
attentions=all_attentions,
|
| 606 |
+
router_logits=all_router_logits,
|
| 607 |
+
)
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
|
| 611 |
+
def __init__(self, config: MiniMaxM2Config) -> None:
|
| 612 |
+
super().__init__(config)
|
| 613 |
+
self.model = MiniMaxM2Model(config)
|
| 614 |
+
self.vocab_size = config.vocab_size
|
| 615 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 616 |
+
self.router_aux_loss_coef = config.router_aux_loss_coef
|
| 617 |
+
self.num_experts = config.num_local_experts
|
| 618 |
+
self.num_experts_per_tok = config.num_experts_per_tok
|
| 619 |
+
self.logits_processor = MiniMaxM2LogitsProcessor(config)
|
| 620 |
+
|
| 621 |
+
self.post_init()
|
| 622 |
+
|
| 623 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 624 |
+
return self.model.embed_tokens
|
| 625 |
+
|
| 626 |
+
def set_input_embeddings(self, value: nn.Module) -> None:
|
| 627 |
+
self.model.embed_tokens = value
|
| 628 |
+
|
| 629 |
+
def get_output_embeddings(self) -> nn.Module:
|
| 630 |
+
return self.lm_head
|
| 631 |
+
|
| 632 |
+
def set_output_embeddings(self, new_embeddings: nn.Module) -> None:
|
| 633 |
+
self.lm_head = new_embeddings
|
| 634 |
+
|
| 635 |
+
def prepare_inputs_for_generation(
|
| 636 |
+
self,
|
| 637 |
+
input_ids: torch.LongTensor,
|
| 638 |
+
past_key_values: Optional[Cache] = None,
|
| 639 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 640 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 641 |
+
**kwargs,
|
| 642 |
+
):
|
| 643 |
+
if past_key_values is not None:
|
| 644 |
+
input_ids = input_ids[:, -1:]
|
| 645 |
+
if attention_mask is not None:
|
| 646 |
+
attention_mask = attention_mask[:, -past_key_values.get_seq_length() - 1 :]
|
| 647 |
+
|
| 648 |
+
return {
|
| 649 |
+
"input_ids": input_ids,
|
| 650 |
+
"attention_mask": attention_mask,
|
| 651 |
+
"past_key_values": past_key_values,
|
| 652 |
+
"inputs_embeds": inputs_embeds,
|
| 653 |
+
}
|
| 654 |
+
|
| 655 |
+
def forward(
|
| 656 |
+
self,
|
| 657 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 658 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 659 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 660 |
+
past_key_values: Optional[Cache] = None,
|
| 661 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 662 |
+
labels: Optional[torch.LongTensor] = None,
|
| 663 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 664 |
+
use_cache: Optional[bool] = None,
|
| 665 |
+
output_attentions: bool = False,
|
| 666 |
+
output_hidden_states: bool = False,
|
| 667 |
+
output_router_logits: Optional[bool] = None,
|
| 668 |
+
return_dict: Optional[bool] = None,
|
| 669 |
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
| 670 |
+
) -> Union[MoeCausalLMOutputWithPast, Tuple]:
|
| 671 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 672 |
+
output_router_logits = (
|
| 673 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 674 |
+
)
|
| 675 |
+
|
| 676 |
+
model_outputs = self.model(
|
| 677 |
+
input_ids=input_ids,
|
| 678 |
+
attention_mask=attention_mask,
|
| 679 |
+
position_ids=position_ids,
|
| 680 |
+
past_key_values=past_key_values,
|
| 681 |
+
inputs_embeds=inputs_embeds,
|
| 682 |
+
cache_position=cache_position,
|
| 683 |
+
use_cache=use_cache,
|
| 684 |
+
output_attentions=output_attentions,
|
| 685 |
+
output_hidden_states=output_hidden_states,
|
| 686 |
+
output_router_logits=output_router_logits,
|
| 687 |
+
return_dict=True,
|
| 688 |
+
)
|
| 689 |
+
|
| 690 |
+
hidden_states = model_outputs.last_hidden_state
|
| 691 |
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) and logits_to_keep > 0 else slice(None)
|
| 692 |
+
logits = self.logits_processor(self.lm_head, hidden_states[:, slice_indices, :])
|
| 693 |
+
|
| 694 |
+
loss = None
|
| 695 |
+
if labels is not None:
|
| 696 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 697 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 698 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 699 |
+
loss = loss_fct(shift_logits.view(-1, self.vocab_size), shift_labels.view(-1))
|
| 700 |
+
|
| 701 |
+
aux_loss = None
|
| 702 |
+
if output_router_logits and model_outputs.router_logits is not None:
|
| 703 |
+
aux_loss = load_balancing_loss_func(
|
| 704 |
+
model_outputs.router_logits,
|
| 705 |
+
num_experts=self.num_experts,
|
| 706 |
+
top_k=self.num_experts_per_tok,
|
| 707 |
+
attention_mask=attention_mask,
|
| 708 |
+
)
|
| 709 |
+
if loss is not None:
|
| 710 |
+
loss = loss + self.router_aux_loss_coef * aux_loss.to(loss.device)
|
| 711 |
+
|
| 712 |
+
if not return_dict:
|
| 713 |
+
output = (logits,) + (model_outputs.past_key_values,)
|
| 714 |
+
if output_hidden_states:
|
| 715 |
+
output += (model_outputs.hidden_states,)
|
| 716 |
+
if output_attentions:
|
| 717 |
+
output += (model_outputs.attentions,)
|
| 718 |
+
if output_router_logits:
|
| 719 |
+
output += (model_outputs.router_logits,)
|
| 720 |
+
return ((loss,) + output) if loss is not None else output
|
| 721 |
+
|
| 722 |
+
return MoeCausalLMOutputWithPast(
|
| 723 |
+
loss=loss,
|
| 724 |
+
aux_loss=aux_loss,
|
| 725 |
+
logits=logits,
|
| 726 |
+
past_key_values=model_outputs.past_key_values,
|
| 727 |
+
hidden_states=model_outputs.hidden_states,
|
| 728 |
+
attentions=model_outputs.attentions,
|
| 729 |
+
router_logits=model_outputs.router_logits,
|
| 730 |
+
)
|
| 731 |
+
|
| 732 |
+
# -----------------------------------------------------------------------------
|
| 733 |
+
# Backward compatibility aliases
|
| 734 |
+
# -----------------------------------------------------------------------------
|
| 735 |
+
|
| 736 |
+
MiniMaxRMSNorm = MiniMaxM2RMSNorm
|
| 737 |
+
MiniMaxSparseMoeBlock = MiniMaxM2SparseMoeBlock
|
| 738 |
+
MiniMaxAttention = MiniMaxM2Attention
|
| 739 |
+
MiniMaxDecoderLayer = MiniMaxM2DecoderLayer
|
| 740 |
+
MiniMaxMLP = MiniMaxM2MLP
|
| 741 |
+
MiniMaxPreTrainedModel = MiniMaxM2PreTrainedModel
|
| 742 |
+
MiniMaxModel = MiniMaxM2Model
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
class MiniMaxForCausalLM(MiniMaxM2ForCausalLM):
|
| 746 |
+
"""Alias for compatibility with checkpoints exporting MiniMaxForCausalLM."""
|
| 747 |
+
|
| 748 |
+
|
| 749 |
+
__all__ = [
|
| 750 |
+
"MiniMaxM2RMSNorm",
|
| 751 |
+
"MiniMaxM2SparseMoeBlock",
|
| 752 |
+
"MiniMaxM2Attention",
|
| 753 |
+
"MiniMaxM2DecoderLayer",
|
| 754 |
+
"MiniMaxM2Model",
|
| 755 |
+
"MiniMaxM2ForCausalLM",
|
| 756 |
+
"MiniMaxM2PreTrainedModel",
|
| 757 |
+
"MiniMaxRMSNorm",
|
| 758 |
+
"MiniMaxSparseMoeBlock",
|
| 759 |
+
"MiniMaxAttention",
|
| 760 |
+
"MiniMaxDecoderLayer",
|
| 761 |
+
"MiniMaxPreTrainedModel",
|
| 762 |
+
"MiniMaxModel",
|
| 763 |
+
"MiniMaxMLP",
|
| 764 |
+
"MiniMaxForCausalLM",
|
| 765 |
+
]
|
quant_log.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
quantize_config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bits": 4,
|
| 3 |
+
"group_size": 32,
|
| 4 |
+
"desc_act": false,
|
| 5 |
+
"sym": true,
|
| 6 |
+
"lm_head": false,
|
| 7 |
+
"quant_method": "gptq",
|
| 8 |
+
"checkpoint_format": "gptq",
|
| 9 |
+
"pack_dtype": "int32",
|
| 10 |
+
"meta": {
|
| 11 |
+
"quantizer": [
|
| 12 |
+
"gptqmodel:5.1.0-dev"
|
| 13 |
+
],
|
| 14 |
+
"uri": "https://github.com/modelcloud/gptqmodel",
|
| 15 |
+
"damp_percent": 0.05,
|
| 16 |
+
"damp_auto_increment": 0.01,
|
| 17 |
+
"static_groups": false,
|
| 18 |
+
"true_sequential": true,
|
| 19 |
+
"mse": 0.0,
|
| 20 |
+
"v2": false,
|
| 21 |
+
"v2_alpha": 0.25,
|
| 22 |
+
"act_group_aware": true
|
| 23 |
+
},
|
| 24 |
+
"pack_impl": "cpu"
|
| 25 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<code_interpreter>",
|
| 4 |
+
"<commit_after>",
|
| 5 |
+
"<commit_before>",
|
| 6 |
+
"<commit_msg>",
|
| 7 |
+
"<empty_output>",
|
| 8 |
+
"<filename>",
|
| 9 |
+
"<fim_middle>",
|
| 10 |
+
"<fim_pad>",
|
| 11 |
+
"<fim_prefix>",
|
| 12 |
+
"<fim_suffix>",
|
| 13 |
+
"<function_call>",
|
| 14 |
+
"<gh_stars>",
|
| 15 |
+
"]<]speech[>[",
|
| 16 |
+
"]<]image[>[",
|
| 17 |
+
"]<]video[>[",
|
| 18 |
+
"]<]start of speech[>[",
|
| 19 |
+
"]<]end of speech[>[",
|
| 20 |
+
"]<]start of image[>[",
|
| 21 |
+
"]<]end of image[>[",
|
| 22 |
+
"]<]start of video[>[",
|
| 23 |
+
"]<]end of video[>[",
|
| 24 |
+
"]<]vision pad[>[",
|
| 25 |
+
"]~!b[",
|
| 26 |
+
"<issue_closed>",
|
| 27 |
+
"<issue_comment>",
|
| 28 |
+
"<issue_start>",
|
| 29 |
+
"<jupyter_code>",
|
| 30 |
+
"<jupyter_output>",
|
| 31 |
+
"<jupyter_start>",
|
| 32 |
+
"<jupyter_text>",
|
| 33 |
+
"<reponame>",
|
| 34 |
+
"[e~[",
|
| 35 |
+
"]!d~[",
|
| 36 |
+
"]!p~[",
|
| 37 |
+
"]~b]",
|
| 38 |
+
"<jupyter_error>",
|
| 39 |
+
"<add_file>",
|
| 40 |
+
"<delete_file>",
|
| 41 |
+
"<rename_file>",
|
| 42 |
+
"<edit_file>",
|
| 43 |
+
"<commit_message>",
|
| 44 |
+
"<empty_source_file>",
|
| 45 |
+
"<repo_struct>",
|
| 46 |
+
"<code_context>",
|
| 47 |
+
"<file_content>",
|
| 48 |
+
"<source_files>",
|
| 49 |
+
"<pr_start>",
|
| 50 |
+
"<review_comment>",
|
| 51 |
+
"<filepath>",
|
| 52 |
+
"<file_sep>"
|
| 53 |
+
],
|
| 54 |
+
"bos_token": {
|
| 55 |
+
"content": "]~!b[",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": false,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false
|
| 60 |
+
},
|
| 61 |
+
"eos_token": {
|
| 62 |
+
"content": "[e~[",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false
|
| 67 |
+
},
|
| 68 |
+
"pad_token": "[e~[",
|
| 69 |
+
"unk_token": {
|
| 70 |
+
"content": "]!d~[",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false
|
| 75 |
+
}
|
| 76 |
+
}
|
test_minimax_m2_hf.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2024-2025 ModelCloud.ai
|
| 2 |
+
# SPDX-FileCopyrightText: 2024-2025 qubitium@modelcloud.ai
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
# Contact: qubitium@modelcloud.ai, x.com/qubitium
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
MiniMax-M2 Hugging Face checkpoint sanity check with streaming output.
|
| 8 |
+
|
| 9 |
+
Usage:
|
| 10 |
+
python test_minimax_m2_hf.py \
|
| 11 |
+
--model-path /monster/data/model/MiniMax-M2-bf16 \
|
| 12 |
+
--question "How many letter A are there in the word Alphabet? Reply with the number only."
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
import threading
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 23 |
+
|
| 24 |
+
# from gptqmodel.hf_minimax_m2.modeling_minimax_m2 import (
|
| 25 |
+
# MiniMaxAttention,
|
| 26 |
+
# MiniMaxDecoderLayer,
|
| 27 |
+
# MiniMaxForCausalLM,
|
| 28 |
+
# MiniMaxMLP,
|
| 29 |
+
# MiniMaxM2Attention,
|
| 30 |
+
# MiniMaxM2DecoderLayer,
|
| 31 |
+
# MiniMaxM2ForCausalLM,
|
| 32 |
+
# MiniMaxM2MLP,
|
| 33 |
+
# MiniMaxM2RMSNorm,
|
| 34 |
+
# MiniMaxM2SparseMoeBlock,
|
| 35 |
+
# MiniMaxRMSNorm,
|
| 36 |
+
# MiniMaxSparseMoeBlock,
|
| 37 |
+
# )
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def parse_args() -> argparse.Namespace:
|
| 41 |
+
parser = argparse.ArgumentParser(description="MiniMax-M2 HF checkpoint smoke test.")
|
| 42 |
+
parser.add_argument(
|
| 43 |
+
"--model-path",
|
| 44 |
+
type=str,
|
| 45 |
+
default="/monster/data/model/MiniMax-M2-bf16",
|
| 46 |
+
help="Path to the MiniMax-M2 Hugging Face checkpoint directory.",
|
| 47 |
+
)
|
| 48 |
+
parser.add_argument(
|
| 49 |
+
"--question",
|
| 50 |
+
type=str,
|
| 51 |
+
default="How many letter A are there in the word Alphabet? Reply with the number only.",
|
| 52 |
+
help="User question to send through the chat template.",
|
| 53 |
+
)
|
| 54 |
+
parser.add_argument(
|
| 55 |
+
"--max-new-tokens",
|
| 56 |
+
type=int,
|
| 57 |
+
default=512,
|
| 58 |
+
help="Maximum number of new tokens to sample from the model.",
|
| 59 |
+
)
|
| 60 |
+
return parser.parse_args()
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def build_prompt(tokenizer: AutoTokenizer, question: str) -> str:
|
| 64 |
+
messages = [
|
| 65 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 66 |
+
{"role": "user", "content": question},
|
| 67 |
+
]
|
| 68 |
+
return tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# def assert_module_types(model: MiniMaxM2ForCausalLM) -> None:
|
| 72 |
+
# causal_lm_types = (MiniMaxM2ForCausalLM, MiniMaxForCausalLM)
|
| 73 |
+
# decoder_layer_types = (MiniMaxM2DecoderLayer, MiniMaxDecoderLayer)
|
| 74 |
+
# attention_types = (MiniMaxM2Attention, MiniMaxAttention)
|
| 75 |
+
# moe_block_types = (MiniMaxM2SparseMoeBlock, MiniMaxSparseMoeBlock)
|
| 76 |
+
# norm_types = (MiniMaxM2RMSNorm, MiniMaxRMSNorm)
|
| 77 |
+
# mlp_types = (MiniMaxM2MLP, MiniMaxMLP)
|
| 78 |
+
#
|
| 79 |
+
# assert isinstance(
|
| 80 |
+
# model, causal_lm_types
|
| 81 |
+
# ), f"Expected MiniMaxM2ForCausalLM/MiniMaxForCausalLM, received {type(model).__name__}"
|
| 82 |
+
#
|
| 83 |
+
# decoder = getattr(model, "model", None)
|
| 84 |
+
# assert decoder is not None, "Model is missing the `model` attribute with decoder layers."
|
| 85 |
+
#
|
| 86 |
+
# for layer_idx, layer in enumerate(decoder.layers):
|
| 87 |
+
# assert isinstance(
|
| 88 |
+
# layer, decoder_layer_types
|
| 89 |
+
# ), f"Layer {layer_idx}: expected MiniMax(M2)DecoderLayer, got {type(layer).__name__}"
|
| 90 |
+
# assert isinstance(
|
| 91 |
+
# layer.self_attn, attention_types
|
| 92 |
+
# ), f"Layer {layer_idx}: unexpected self_attn type {type(layer.self_attn).__name__}"
|
| 93 |
+
# assert isinstance(
|
| 94 |
+
# layer.block_sparse_moe, moe_block_types
|
| 95 |
+
# ), f"Layer {layer_idx}: unexpected MoE block type {type(layer.block_sparse_moe).__name__}"
|
| 96 |
+
# assert isinstance(
|
| 97 |
+
# layer.input_layernorm, norm_types
|
| 98 |
+
# ), f"Layer {layer_idx}: unexpected input_layernorm type {type(layer.input_layernorm).__name__}"
|
| 99 |
+
# assert isinstance(
|
| 100 |
+
# layer.post_attention_layernorm, norm_types
|
| 101 |
+
# ), f"Layer {layer_idx}: unexpected post_attention_layernorm type {type(layer.post_attention_layernorm).__name__}"
|
| 102 |
+
#
|
| 103 |
+
# moe_block = layer.block_sparse_moe
|
| 104 |
+
# assert isinstance(
|
| 105 |
+
# moe_block.experts, nn.ModuleList
|
| 106 |
+
# ), f"Layer {layer_idx}: expected experts to be a ModuleList, got {type(moe_block.experts).__name__}"
|
| 107 |
+
# for expert_idx, expert in enumerate(moe_block.experts):
|
| 108 |
+
# assert isinstance(
|
| 109 |
+
# expert, mlp_types
|
| 110 |
+
# ), f"Layer {layer_idx} expert {expert_idx}: expected MiniMax(M2)MLP, got {type(expert).__name__}"
|
| 111 |
+
#
|
| 112 |
+
|
| 113 |
+
def main() -> None:
|
| 114 |
+
args = parse_args()
|
| 115 |
+
model_path = Path(args.model_path).expanduser().resolve()
|
| 116 |
+
|
| 117 |
+
print(f"Loading tokenizer from {model_path}...")
|
| 118 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 119 |
+
|
| 120 |
+
print(f"Loading model from {model_path}...")
|
| 121 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 122 |
+
model_path,
|
| 123 |
+
dtype="bfloat16",
|
| 124 |
+
device_map="auto",
|
| 125 |
+
trust_remote_code=True,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# Uncomment to enforce module type checks.
|
| 129 |
+
# print("Validating module types...")
|
| 130 |
+
# assert_module_types(model)
|
| 131 |
+
|
| 132 |
+
prompt = build_prompt(tokenizer, args.question)
|
| 133 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 134 |
+
|
| 135 |
+
print("Running generation (streaming)...\n")
|
| 136 |
+
streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=False)
|
| 137 |
+
eos_ids = model.generation_config.eos_token_id
|
| 138 |
+
if eos_ids is None:
|
| 139 |
+
eos_ids = []
|
| 140 |
+
elif isinstance(eos_ids, int):
|
| 141 |
+
eos_ids = [eos_ids]
|
| 142 |
+
think_end_id = tokenizer.convert_tokens_to_ids("</think>")
|
| 143 |
+
if think_end_id is not None and think_end_id not in eos_ids:
|
| 144 |
+
eos_ids = eos_ids + [think_end_id]
|
| 145 |
+
|
| 146 |
+
generation_kwargs = dict(
|
| 147 |
+
**inputs,
|
| 148 |
+
max_new_tokens=args.max_new_tokens,
|
| 149 |
+
streamer=streamer,
|
| 150 |
+
eos_token_id=eos_ids if eos_ids else None,
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
generation_thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
|
| 154 |
+
generation_thread.start()
|
| 155 |
+
|
| 156 |
+
completion = []
|
| 157 |
+
first_chunk = True
|
| 158 |
+
seen_end_reasoning = False
|
| 159 |
+
for text in streamer:
|
| 160 |
+
if first_chunk:
|
| 161 |
+
print("<think>", end="", flush=True)
|
| 162 |
+
completion.append("<think>")
|
| 163 |
+
first_chunk = False
|
| 164 |
+
print(text, end="", flush=True)
|
| 165 |
+
completion.append(text)
|
| 166 |
+
if "</think>" in text:
|
| 167 |
+
seen_end_reasoning = True
|
| 168 |
+
|
| 169 |
+
generation_thread.join()
|
| 170 |
+
print("\n\n=== Completed Response ===")
|
| 171 |
+
final_text = "".join(completion).strip()
|
| 172 |
+
print(final_text or "<empty response>")
|
| 173 |
+
if not seen_end_reasoning:
|
| 174 |
+
print("\n[warning] No </think> token detected in streamed output.", flush=True)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
if __name__ == "__main__":
|
| 178 |
+
main()
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7b90ed7f55d905175bc26771d6d7d33b40b46742f073675bc816fedaf482ea1
|
| 3 |
+
size 15522763
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"200000": {
|
| 5 |
+
"content": "]!p~[",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"200001": {
|
| 13 |
+
"content": "<fim_prefix>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"200002": {
|
| 21 |
+
"content": "<fim_middle>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"200003": {
|
| 29 |
+
"content": "<fim_suffix>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"200004": {
|
| 37 |
+
"content": "<fim_pad>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
},
|
| 44 |
+
"200005": {
|
| 45 |
+
"content": "<reponame>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
},
|
| 52 |
+
"200006": {
|
| 53 |
+
"content": "<filename>",
|
| 54 |
+
"lstrip": false,
|
| 55 |
+
"normalized": false,
|
| 56 |
+
"rstrip": false,
|
| 57 |
+
"single_word": false,
|
| 58 |
+
"special": true
|
| 59 |
+
},
|
| 60 |
+
"200007": {
|
| 61 |
+
"content": "<gh_stars>",
|
| 62 |
+
"lstrip": false,
|
| 63 |
+
"normalized": false,
|
| 64 |
+
"rstrip": false,
|
| 65 |
+
"single_word": false,
|
| 66 |
+
"special": true
|
| 67 |
+
},
|
| 68 |
+
"200008": {
|
| 69 |
+
"content": "<issue_start>",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false,
|
| 74 |
+
"special": true
|
| 75 |
+
},
|
| 76 |
+
"200009": {
|
| 77 |
+
"content": "<issue_comment>",
|
| 78 |
+
"lstrip": false,
|
| 79 |
+
"normalized": false,
|
| 80 |
+
"rstrip": false,
|
| 81 |
+
"single_word": false,
|
| 82 |
+
"special": true
|
| 83 |
+
},
|
| 84 |
+
"200010": {
|
| 85 |
+
"content": "<issue_closed>",
|
| 86 |
+
"lstrip": false,
|
| 87 |
+
"normalized": false,
|
| 88 |
+
"rstrip": false,
|
| 89 |
+
"single_word": false,
|
| 90 |
+
"special": true
|
| 91 |
+
},
|
| 92 |
+
"200011": {
|
| 93 |
+
"content": "<jupyter_start>",
|
| 94 |
+
"lstrip": false,
|
| 95 |
+
"normalized": false,
|
| 96 |
+
"rstrip": false,
|
| 97 |
+
"single_word": false,
|
| 98 |
+
"special": true
|
| 99 |
+
},
|
| 100 |
+
"200012": {
|
| 101 |
+
"content": "<jupyter_text>",
|
| 102 |
+
"lstrip": false,
|
| 103 |
+
"normalized": false,
|
| 104 |
+
"rstrip": false,
|
| 105 |
+
"single_word": false,
|
| 106 |
+
"special": true
|
| 107 |
+
},
|
| 108 |
+
"200013": {
|
| 109 |
+
"content": "<jupyter_code>",
|
| 110 |
+
"lstrip": false,
|
| 111 |
+
"normalized": false,
|
| 112 |
+
"rstrip": false,
|
| 113 |
+
"single_word": false,
|
| 114 |
+
"special": true
|
| 115 |
+
},
|
| 116 |
+
"200014": {
|
| 117 |
+
"content": "<jupyter_output>",
|
| 118 |
+
"lstrip": false,
|
| 119 |
+
"normalized": false,
|
| 120 |
+
"rstrip": false,
|
| 121 |
+
"single_word": false,
|
| 122 |
+
"special": true
|
| 123 |
+
},
|
| 124 |
+
"200015": {
|
| 125 |
+
"content": "<empty_output>",
|
| 126 |
+
"lstrip": false,
|
| 127 |
+
"normalized": false,
|
| 128 |
+
"rstrip": false,
|
| 129 |
+
"single_word": false,
|
| 130 |
+
"special": true
|
| 131 |
+
},
|
| 132 |
+
"200016": {
|
| 133 |
+
"content": "<commit_before>",
|
| 134 |
+
"lstrip": false,
|
| 135 |
+
"normalized": false,
|
| 136 |
+
"rstrip": false,
|
| 137 |
+
"single_word": false,
|
| 138 |
+
"special": true
|
| 139 |
+
},
|
| 140 |
+
"200017": {
|
| 141 |
+
"content": "<commit_msg>",
|
| 142 |
+
"lstrip": false,
|
| 143 |
+
"normalized": false,
|
| 144 |
+
"rstrip": false,
|
| 145 |
+
"single_word": false,
|
| 146 |
+
"special": true
|
| 147 |
+
},
|
| 148 |
+
"200018": {
|
| 149 |
+
"content": "<commit_after>",
|
| 150 |
+
"lstrip": false,
|
| 151 |
+
"normalized": false,
|
| 152 |
+
"rstrip": false,
|
| 153 |
+
"single_word": false,
|
| 154 |
+
"special": true
|
| 155 |
+
},
|
| 156 |
+
"200019": {
|
| 157 |
+
"content": "]~b]",
|
| 158 |
+
"lstrip": false,
|
| 159 |
+
"normalized": false,
|
| 160 |
+
"rstrip": false,
|
| 161 |
+
"single_word": false,
|
| 162 |
+
"special": true
|
| 163 |
+
},
|
| 164 |
+
"200020": {
|
| 165 |
+
"content": "[e~[",
|
| 166 |
+
"lstrip": false,
|
| 167 |
+
"normalized": false,
|
| 168 |
+
"rstrip": false,
|
| 169 |
+
"single_word": false,
|
| 170 |
+
"special": true
|
| 171 |
+
},
|
| 172 |
+
"200021": {
|
| 173 |
+
"content": "]!d~[",
|
| 174 |
+
"lstrip": false,
|
| 175 |
+
"normalized": false,
|
| 176 |
+
"rstrip": false,
|
| 177 |
+
"single_word": false,
|
| 178 |
+
"special": true
|
| 179 |
+
},
|
| 180 |
+
"200022": {
|
| 181 |
+
"content": "<function_call>",
|
| 182 |
+
"lstrip": false,
|
| 183 |
+
"normalized": false,
|
| 184 |
+
"rstrip": false,
|
| 185 |
+
"single_word": false,
|
| 186 |
+
"special": true
|
| 187 |
+
},
|
| 188 |
+
"200023": {
|
| 189 |
+
"content": "<code_interpreter>",
|
| 190 |
+
"lstrip": false,
|
| 191 |
+
"normalized": false,
|
| 192 |
+
"rstrip": false,
|
| 193 |
+
"single_word": false,
|
| 194 |
+
"special": true
|
| 195 |
+
},
|
| 196 |
+
"200024": {
|
| 197 |
+
"content": "]<]speech[>[",
|
| 198 |
+
"lstrip": false,
|
| 199 |
+
"normalized": false,
|
| 200 |
+
"rstrip": false,
|
| 201 |
+
"single_word": false,
|
| 202 |
+
"special": true
|
| 203 |
+
},
|
| 204 |
+
"200025": {
|
| 205 |
+
"content": "]<]image[>[",
|
| 206 |
+
"lstrip": false,
|
| 207 |
+
"normalized": false,
|
| 208 |
+
"rstrip": false,
|
| 209 |
+
"single_word": false,
|
| 210 |
+
"special": true
|
| 211 |
+
},
|
| 212 |
+
"200026": {
|
| 213 |
+
"content": "]<]video[>[",
|
| 214 |
+
"lstrip": false,
|
| 215 |
+
"normalized": false,
|
| 216 |
+
"rstrip": false,
|
| 217 |
+
"single_word": false,
|
| 218 |
+
"special": true
|
| 219 |
+
},
|
| 220 |
+
"200027": {
|
| 221 |
+
"content": "]<]start of speech[>[",
|
| 222 |
+
"lstrip": false,
|
| 223 |
+
"normalized": false,
|
| 224 |
+
"rstrip": false,
|
| 225 |
+
"single_word": false,
|
| 226 |
+
"special": true
|
| 227 |
+
},
|
| 228 |
+
"200028": {
|
| 229 |
+
"content": "]<]end of speech[>[",
|
| 230 |
+
"lstrip": false,
|
| 231 |
+
"normalized": false,
|
| 232 |
+
"rstrip": false,
|
| 233 |
+
"single_word": false,
|
| 234 |
+
"special": true
|
| 235 |
+
},
|
| 236 |
+
"200029": {
|
| 237 |
+
"content": "]<]start of image[>[",
|
| 238 |
+
"lstrip": false,
|
| 239 |
+
"normalized": false,
|
| 240 |
+
"rstrip": false,
|
| 241 |
+
"single_word": false,
|
| 242 |
+
"special": true
|
| 243 |
+
},
|
| 244 |
+
"200030": {
|
| 245 |
+
"content": "]<]end of image[>[",
|
| 246 |
+
"lstrip": false,
|
| 247 |
+
"normalized": false,
|
| 248 |
+
"rstrip": false,
|
| 249 |
+
"single_word": false,
|
| 250 |
+
"special": true
|
| 251 |
+
},
|
| 252 |
+
"200031": {
|
| 253 |
+
"content": "]<]start of video[>[",
|
| 254 |
+
"lstrip": false,
|
| 255 |
+
"normalized": false,
|
| 256 |
+
"rstrip": false,
|
| 257 |
+
"single_word": false,
|
| 258 |
+
"special": true
|
| 259 |
+
},
|
| 260 |
+
"200032": {
|
| 261 |
+
"content": "]<]end of video[>[",
|
| 262 |
+
"lstrip": false,
|
| 263 |
+
"normalized": false,
|
| 264 |
+
"rstrip": false,
|
| 265 |
+
"single_word": false,
|
| 266 |
+
"special": true
|
| 267 |
+
},
|
| 268 |
+
"200033": {
|
| 269 |
+
"content": "]<]vision pad[>[",
|
| 270 |
+
"lstrip": false,
|
| 271 |
+
"normalized": false,
|
| 272 |
+
"rstrip": false,
|
| 273 |
+
"single_word": false,
|
| 274 |
+
"special": true
|
| 275 |
+
},
|
| 276 |
+
"200034": {
|
| 277 |
+
"content": "]~!b[",
|
| 278 |
+
"lstrip": false,
|
| 279 |
+
"normalized": false,
|
| 280 |
+
"rstrip": false,
|
| 281 |
+
"single_word": false,
|
| 282 |
+
"special": true
|
| 283 |
+
},
|
| 284 |
+
"200035": {
|
| 285 |
+
"content": "<jupyter_error>",
|
| 286 |
+
"lstrip": false,
|
| 287 |
+
"normalized": false,
|
| 288 |
+
"rstrip": false,
|
| 289 |
+
"single_word": false,
|
| 290 |
+
"special": true
|
| 291 |
+
},
|
| 292 |
+
"200036": {
|
| 293 |
+
"content": "<add_file>",
|
| 294 |
+
"lstrip": false,
|
| 295 |
+
"normalized": false,
|
| 296 |
+
"rstrip": false,
|
| 297 |
+
"single_word": false,
|
| 298 |
+
"special": true
|
| 299 |
+
},
|
| 300 |
+
"200037": {
|
| 301 |
+
"content": "<delete_file>",
|
| 302 |
+
"lstrip": false,
|
| 303 |
+
"normalized": false,
|
| 304 |
+
"rstrip": false,
|
| 305 |
+
"single_word": false,
|
| 306 |
+
"special": true
|
| 307 |
+
},
|
| 308 |
+
"200038": {
|
| 309 |
+
"content": "<rename_file>",
|
| 310 |
+
"lstrip": false,
|
| 311 |
+
"normalized": false,
|
| 312 |
+
"rstrip": false,
|
| 313 |
+
"single_word": false,
|
| 314 |
+
"special": true
|
| 315 |
+
},
|
| 316 |
+
"200039": {
|
| 317 |
+
"content": "<edit_file>",
|
| 318 |
+
"lstrip": false,
|
| 319 |
+
"normalized": false,
|
| 320 |
+
"rstrip": false,
|
| 321 |
+
"single_word": false,
|
| 322 |
+
"special": true
|
| 323 |
+
},
|
| 324 |
+
"200040": {
|
| 325 |
+
"content": "<commit_message>",
|
| 326 |
+
"lstrip": false,
|
| 327 |
+
"normalized": false,
|
| 328 |
+
"rstrip": false,
|
| 329 |
+
"single_word": false,
|
| 330 |
+
"special": true
|
| 331 |
+
},
|
| 332 |
+
"200041": {
|
| 333 |
+
"content": "<empty_source_file>",
|
| 334 |
+
"lstrip": false,
|
| 335 |
+
"normalized": false,
|
| 336 |
+
"rstrip": false,
|
| 337 |
+
"single_word": false,
|
| 338 |
+
"special": true
|
| 339 |
+
},
|
| 340 |
+
"200042": {
|
| 341 |
+
"content": "<repo_struct>",
|
| 342 |
+
"lstrip": false,
|
| 343 |
+
"normalized": false,
|
| 344 |
+
"rstrip": false,
|
| 345 |
+
"single_word": false,
|
| 346 |
+
"special": true
|
| 347 |
+
},
|
| 348 |
+
"200043": {
|
| 349 |
+
"content": "<code_context>",
|
| 350 |
+
"lstrip": false,
|
| 351 |
+
"normalized": false,
|
| 352 |
+
"rstrip": false,
|
| 353 |
+
"single_word": false,
|
| 354 |
+
"special": true
|
| 355 |
+
},
|
| 356 |
+
"200044": {
|
| 357 |
+
"content": "<file_content>",
|
| 358 |
+
"lstrip": false,
|
| 359 |
+
"normalized": false,
|
| 360 |
+
"rstrip": false,
|
| 361 |
+
"single_word": false,
|
| 362 |
+
"special": true
|
| 363 |
+
},
|
| 364 |
+
"200045": {
|
| 365 |
+
"content": "<source_files>",
|
| 366 |
+
"lstrip": false,
|
| 367 |
+
"normalized": false,
|
| 368 |
+
"rstrip": false,
|
| 369 |
+
"single_word": false,
|
| 370 |
+
"special": true
|
| 371 |
+
},
|
| 372 |
+
"200046": {
|
| 373 |
+
"content": "<pr_start>",
|
| 374 |
+
"lstrip": false,
|
| 375 |
+
"normalized": false,
|
| 376 |
+
"rstrip": false,
|
| 377 |
+
"single_word": false,
|
| 378 |
+
"special": true
|
| 379 |
+
},
|
| 380 |
+
"200047": {
|
| 381 |
+
"content": "<review_comment>",
|
| 382 |
+
"lstrip": false,
|
| 383 |
+
"normalized": false,
|
| 384 |
+
"rstrip": false,
|
| 385 |
+
"single_word": false,
|
| 386 |
+
"special": true
|
| 387 |
+
},
|
| 388 |
+
"200048": {
|
| 389 |
+
"content": "<filepath>",
|
| 390 |
+
"lstrip": false,
|
| 391 |
+
"normalized": false,
|
| 392 |
+
"rstrip": false,
|
| 393 |
+
"single_word": false,
|
| 394 |
+
"special": true
|
| 395 |
+
},
|
| 396 |
+
"200049": {
|
| 397 |
+
"content": "<file_sep>",
|
| 398 |
+
"lstrip": false,
|
| 399 |
+
"normalized": false,
|
| 400 |
+
"rstrip": false,
|
| 401 |
+
"single_word": false,
|
| 402 |
+
"special": true
|
| 403 |
+
},
|
| 404 |
+
"200050": {
|
| 405 |
+
"content": "<think>",
|
| 406 |
+
"lstrip": false,
|
| 407 |
+
"normalized": false,
|
| 408 |
+
"rstrip": false,
|
| 409 |
+
"single_word": false,
|
| 410 |
+
"special": false
|
| 411 |
+
},
|
| 412 |
+
"200051": {
|
| 413 |
+
"content": "</think>",
|
| 414 |
+
"lstrip": false,
|
| 415 |
+
"normalized": false,
|
| 416 |
+
"rstrip": false,
|
| 417 |
+
"single_word": false,
|
| 418 |
+
"special": false
|
| 419 |
+
},
|
| 420 |
+
"200052": {
|
| 421 |
+
"content": "<minimax:tool_call>",
|
| 422 |
+
"lstrip": false,
|
| 423 |
+
"normalized": false,
|
| 424 |
+
"rstrip": false,
|
| 425 |
+
"single_word": false,
|
| 426 |
+
"special": false
|
| 427 |
+
},
|
| 428 |
+
"200053": {
|
| 429 |
+
"content": "</minimax:tool_call>",
|
| 430 |
+
"lstrip": false,
|
| 431 |
+
"normalized": false,
|
| 432 |
+
"rstrip": false,
|
| 433 |
+
"single_word": false,
|
| 434 |
+
"special": false
|
| 435 |
+
}
|
| 436 |
+
},
|
| 437 |
+
"additional_special_tokens": [
|
| 438 |
+
"<code_interpreter>",
|
| 439 |
+
"<commit_after>",
|
| 440 |
+
"<commit_before>",
|
| 441 |
+
"<commit_msg>",
|
| 442 |
+
"<empty_output>",
|
| 443 |
+
"<filename>",
|
| 444 |
+
"<fim_middle>",
|
| 445 |
+
"<fim_pad>",
|
| 446 |
+
"<fim_prefix>",
|
| 447 |
+
"<fim_suffix>",
|
| 448 |
+
"<function_call>",
|
| 449 |
+
"<gh_stars>",
|
| 450 |
+
"]<]speech[>[",
|
| 451 |
+
"]<]image[>[",
|
| 452 |
+
"]<]video[>[",
|
| 453 |
+
"]<]start of speech[>[",
|
| 454 |
+
"]<]end of speech[>[",
|
| 455 |
+
"]<]start of image[>[",
|
| 456 |
+
"]<]end of image[>[",
|
| 457 |
+
"]<]start of video[>[",
|
| 458 |
+
"]<]end of video[>[",
|
| 459 |
+
"]<]vision pad[>[",
|
| 460 |
+
"]~!b[",
|
| 461 |
+
"<issue_closed>",
|
| 462 |
+
"<issue_comment>",
|
| 463 |
+
"<issue_start>",
|
| 464 |
+
"<jupyter_code>",
|
| 465 |
+
"<jupyter_output>",
|
| 466 |
+
"<jupyter_start>",
|
| 467 |
+
"<jupyter_text>",
|
| 468 |
+
"<reponame>",
|
| 469 |
+
"[e~[",
|
| 470 |
+
"]!d~[",
|
| 471 |
+
"]!p~[",
|
| 472 |
+
"]~b]",
|
| 473 |
+
"<jupyter_error>",
|
| 474 |
+
"<add_file>",
|
| 475 |
+
"<delete_file>",
|
| 476 |
+
"<rename_file>",
|
| 477 |
+
"<edit_file>",
|
| 478 |
+
"<commit_message>",
|
| 479 |
+
"<empty_source_file>",
|
| 480 |
+
"<repo_struct>",
|
| 481 |
+
"<code_context>",
|
| 482 |
+
"<file_content>",
|
| 483 |
+
"<source_files>",
|
| 484 |
+
"<pr_start>",
|
| 485 |
+
"<review_comment>",
|
| 486 |
+
"<filepath>",
|
| 487 |
+
"<file_sep>"
|
| 488 |
+
],
|
| 489 |
+
"bos_token": "]~!b[",
|
| 490 |
+
"clean_up_tokenization_spaces": false,
|
| 491 |
+
"eos_token": "[e~[",
|
| 492 |
+
"extra_special_tokens": {},
|
| 493 |
+
"model_max_length": 40960000,
|
| 494 |
+
"pad_token": "[e~[",
|
| 495 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 496 |
+
"unk_token": "]!d~["
|
| 497 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|