fix default generation_config

#23
by Iurnem - opened
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/wanx_colorful_black.png filter=lfs diff=lfs merge=lfs -text
LICENSE CHANGED
@@ -9,7 +9,7 @@ By clicking to agree or by using or distributing any portion or element of the T
9
  b. "We"(or "Us") shall mean Alibaba Cloud.
10
  c. "You" (or "Your") shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Materials for any purpose and in any field of use.
11
  d. "Third Parties" shall mean individuals or legal entities that are not under common control with Us or You.
12
- e. "Tongyi Qianwen" shall mean the large language models (including Qwen model and Qwen-Chat model), and software and algorithms, consisting of trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Us.
13
  f. "Materials" shall mean, collectively, Alibaba Cloud's proprietary Tongyi Qianwen and Documentation (and any portion thereof) made available under this Agreement.
14
  g. "Source" form shall mean the preferred form for making modifications, including but not limited to model source code, documentation source, and configuration files.
15
  h. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation,
 
9
  b. "We"(or "Us") shall mean Alibaba Cloud.
10
  c. "You" (or "Your") shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Materials for any purpose and in any field of use.
11
  d. "Third Parties" shall mean individuals or legal entities that are not under common control with Us or You.
12
+ e. "Tongyi Qianwen" shall mean the large language models (including Qwen-7B model and Qwen-7B-Chat model), and software and algorithms, consisting of trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Us.
13
  f. "Materials" shall mean, collectively, Alibaba Cloud's proprietary Tongyi Qianwen and Documentation (and any portion thereof) made available under this Agreement.
14
  g. "Source" form shall mean the preferred form for making modifications, including but not limited to model source code, documentation source, and configuration files.
15
  h. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation,
NOTICE CHANGED
@@ -49,232 +49,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
49
  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
50
  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
51
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
52
- SOFTWARE.
53
-
54
- ------------- LICENSE FOR stanford_alpaca code --------------
55
-
56
- Apache License
57
- Version 2.0, January 2004
58
- http://www.apache.org/licenses/
59
-
60
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
61
-
62
- 1. Definitions.
63
-
64
- "License" shall mean the terms and conditions for use, reproduction,
65
- and distribution as defined by Sections 1 through 9 of this document.
66
-
67
- "Licensor" shall mean the copyright owner or entity authorized by
68
- the copyright owner that is granting the License.
69
-
70
- "Legal Entity" shall mean the union of the acting entity and all
71
- other entities that control, are controlled by, or are under common
72
- control with that entity. For the purposes of this definition,
73
- "control" means (i) the power, direct or indirect, to cause the
74
- direction or management of such entity, whether by contract or
75
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
76
- outstanding shares, or (iii) beneficial ownership of such entity.
77
-
78
- "You" (or "Your") shall mean an individual or Legal Entity
79
- exercising permissions granted by this License.
80
-
81
- "Source" form shall mean the preferred form for making modifications,
82
- including but not limited to software source code, documentation
83
- source, and configuration files.
84
-
85
- "Object" form shall mean any form resulting from mechanical
86
- transformation or translation of a Source form, including but
87
- not limited to compiled object code, generated documentation,
88
- and conversions to other media types.
89
-
90
- "Work" shall mean the work of authorship, whether in Source or
91
- Object form, made available under the License, as indicated by a
92
- copyright notice that is included in or attached to the work
93
- (an example is provided in the Appendix below).
94
-
95
- "Derivative Works" shall mean any work, whether in Source or Object
96
- form, that is based on (or derived from) the Work and for which the
97
- editorial revisions, annotations, elaborations, or other modifications
98
- represent, as a whole, an original work of authorship. For the purposes
99
- of this License, Derivative Works shall not include works that remain
100
- separable from, or merely link (or bind by name) to the interfaces of,
101
- the Work and Derivative Works thereof.
102
-
103
- "Contribution" shall mean any work of authorship, including
104
- the original version of the Work and any modifications or additions
105
- to that Work or Derivative Works thereof, that is intentionally
106
- submitted to Licensor for inclusion in the Work by the copyright owner
107
- or by an individual or Legal Entity authorized to submit on behalf of
108
- the copyright owner. For the purposes of this definition, "submitted"
109
- means any form of electronic, verbal, or written communication sent
110
- to the Licensor or its representatives, including but not limited to
111
- communication on electronic mailing lists, source code control systems,
112
- and issue tracking systems that are managed by, or on behalf of, the
113
- Licensor for the purpose of discussing and improving the Work, but
114
- excluding communication that is conspicuously marked or otherwise
115
- designated in writing by the copyright owner as "Not a Contribution."
116
-
117
- "Contributor" shall mean Licensor and any individual or Legal Entity
118
- on behalf of whom a Contribution has been received by Licensor and
119
- subsequently incorporated within the Work.
120
-
121
- 2. Grant of Copyright License. Subject to the terms and conditions of
122
- this License, each Contributor hereby grants to You a perpetual,
123
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
124
- copyright license to reproduce, prepare Derivative Works of,
125
- publicly display, publicly perform, sublicense, and distribute the
126
- Work and such Derivative Works in Source or Object form.
127
-
128
- 3. Grant of Patent License. Subject to the terms and conditions of
129
- this License, each Contributor hereby grants to You a perpetual,
130
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
131
- (except as stated in this section) patent license to make, have made,
132
- use, offer to sell, sell, import, and otherwise transfer the Work,
133
- where such license applies only to those patent claims licensable
134
- by such Contributor that are necessarily infringed by their
135
- Contribution(s) alone or by combination of their Contribution(s)
136
- with the Work to which such Contribution(s) was submitted. If You
137
- institute patent litigation against any entity (including a
138
- cross-claim or counterclaim in a lawsuit) alleging that the Work
139
- or a Contribution incorporated within the Work constitutes direct
140
- or contributory patent infringement, then any patent licenses
141
- granted to You under this License for that Work shall terminate
142
- as of the date such litigation is filed.
143
-
144
- 4. Redistribution. You may reproduce and distribute copies of the
145
- Work or Derivative Works thereof in any medium, with or without
146
- modifications, and in Source or Object form, provided that You
147
- meet the following conditions:
148
-
149
- (a) You must give any other recipients of the Work or
150
- Derivative Works a copy of this License; and
151
-
152
- (b) You must cause any modified files to carry prominent notices
153
- stating that You changed the files; and
154
-
155
- (c) You must retain, in the Source form of any Derivative Works
156
- that You distribute, all copyright, patent, trademark, and
157
- attribution notices from the Source form of the Work,
158
- excluding those notices that do not pertain to any part of
159
- the Derivative Works; and
160
-
161
- (d) If the Work includes a "NOTICE" text file as part of its
162
- distribution, then any Derivative Works that You distribute must
163
- include a readable copy of the attribution notices contained
164
- within such NOTICE file, excluding those notices that do not
165
- pertain to any part of the Derivative Works, in at least one
166
- of the following places: within a NOTICE text file distributed
167
- as part of the Derivative Works; within the Source form or
168
- documentation, if provided along with the Derivative Works; or,
169
- within a display generated by the Derivative Works, if and
170
- wherever such third-party notices normally appear. The contents
171
- of the NOTICE file are for informational purposes only and
172
- do not modify the License. You may add Your own attribution
173
- notices within Derivative Works that You distribute, alongside
174
- or as an addendum to the NOTICE text from the Work, provided
175
- that such additional attribution notices cannot be construed
176
- as modifying the License.
177
-
178
- You may add Your own copyright statement to Your modifications and
179
- may provide additional or different license terms and conditions
180
- for use, reproduction, or distribution of Your modifications, or
181
- for any such Derivative Works as a whole, provided Your use,
182
- reproduction, and distribution of the Work otherwise complies with
183
- the conditions stated in this License.
184
-
185
- 5. Submission of Contributions. Unless You explicitly state otherwise,
186
- any Contribution intentionally submitted for inclusion in the Work
187
- by You to the Licensor shall be under the terms and conditions of
188
- this License, without any additional terms or conditions.
189
- Notwithstanding the above, nothing herein shall supersede or modify
190
- the terms of any separate license agreement you may have executed
191
- with Licensor regarding such Contributions.
192
-
193
- 6. Trademarks. This License does not grant permission to use the trade
194
- names, trademarks, service marks, or product names of the Licensor,
195
- except as required for reasonable and customary use in describing the
196
- origin of the Work and reproducing the content of the NOTICE file.
197
-
198
- 7. Disclaimer of Warranty. Unless required by applicable law or
199
- agreed to in writing, Licensor provides the Work (and each
200
- Contributor provides its Contributions) on an "AS IS" BASIS,
201
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
202
- implied, including, without limitation, any warranties or conditions
203
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
204
- PARTICULAR PURPOSE. You are solely responsible for determining the
205
- appropriateness of using or redistributing the Work and assume any
206
- risks associated with Your exercise of permissions under this License.
207
-
208
- 8. Limitation of Liability. In no event and under no legal theory,
209
- whether in tort (including negligence), contract, or otherwise,
210
- unless required by applicable law (such as deliberate and grossly
211
- negligent acts) or agreed to in writing, shall any Contributor be
212
- liable to You for damages, including any direct, indirect, special,
213
- incidental, or consequential damages of any character arising as a
214
- result of this License or out of the use or inability to use the
215
- Work (including but not limited to damages for loss of goodwill,
216
- work stoppage, computer failure or malfunction, or any and all
217
- other commercial damages or losses), even if such Contributor
218
- has been advised of the possibility of such damages.
219
-
220
- 9. Accepting Warranty or Additional Liability. While redistributing
221
- the Work or Derivative Works thereof, You may choose to offer,
222
- and charge a fee for, acceptance of support, warranty, indemnity,
223
- or other liability obligations and/or rights consistent with this
224
- License. However, in accepting such obligations, You may act only
225
- on Your own behalf and on Your sole responsibility, not on behalf
226
- of any other Contributor, and only if You agree to indemnify,
227
- defend, and hold each Contributor harmless for any liability
228
- incurred by, or claims asserted against, such Contributor by reason
229
- of your accepting any such warranty or additional liability.
230
-
231
- END OF TERMS AND CONDITIONS
232
-
233
- APPENDIX: How to apply the Apache License to your work.
234
-
235
- To apply the Apache License to your work, attach the following
236
- boilerplate notice, with the fields enclosed by brackets "[]"
237
- replaced with your own identifying information. (Don't include
238
- the brackets!) The text should be enclosed in the appropriate
239
- comment syntax for the file format. We also recommend that a
240
- file or class name and description of purpose be included on the
241
- same "printed page" as the copyright notice for easier
242
- identification within third-party archives.
243
-
244
- Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
245
-
246
- Licensed under the Apache License, Version 2.0 (the "License");
247
- you may not use this file except in compliance with the License.
248
- You may obtain a copy of the License at
249
-
250
- http://www.apache.org/licenses/LICENSE-2.0
251
-
252
- Unless required by applicable law or agreed to in writing, software
253
- distributed under the License is distributed on an "AS IS" BASIS,
254
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
255
- See the License for the specific language governing permissions and
256
- limitations under the License.
257
-
258
- ------------- LICENSE FOR PanQiWei AutoGPTQ code --------------
259
-
260
- MIT License
261
-
262
- Copyright (c) 2023 潘其威(William)
263
-
264
- Permission is hereby granted, free of charge, to any person obtaining a copy
265
- of this software and associated documentation files (the "Software"), to deal
266
- in the Software without restriction, including without limitation the rights
267
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
268
- copies of the Software, and to permit persons to whom the Software is
269
- furnished to do so, subject to the following conditions:
270
-
271
- The above copyright notice and this permission notice shall be included in all
272
- copies or substantial portions of the Software.
273
-
274
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
275
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
276
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
277
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
278
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
279
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
280
- SOFTWARE.
 
49
  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
50
  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
51
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
52
+ SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -6,46 +6,40 @@ tags:
6
  - qwen
7
  pipeline_tag: text-generation
8
  inference: false
9
- license: other
10
- license_name: tongyi-qianwen-license-agreement
11
- license_link: https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
12
  ---
13
 
14
  # Qwen-7B-Chat
15
 
16
  <p align="center">
17
- <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg" width="400"/>
18
  <p>
19
  <br>
20
 
21
  <p align="center">
22
- 🤗 <a href="https://huggingface.co/Qwen">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp🤖 <a href="https://modelscope.cn/organization/qwen">ModelScope</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://arxiv.org/abs/2309.16609">Paper</a> &nbsp&nbsp | &nbsp&nbsp🖥️ <a href="https://modelscope.cn/studios/qwen/Qwen-7B-Chat-Demo/summary">Demo</a>
23
- <br>
24
- <a href="https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png">WeChat (微信)</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://discord.gg/z3GAxXZ9Ce">Discord</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://dashscope.aliyun.com">API</a>
25
  </p>
26
  <br>
27
 
28
-
29
  ## 介绍(Introduction)
30
 
31
- **通义千问-7B(Qwen-7B)**是阿里云研发的通义千问大模型系列的70亿参数规模的模型。Qwen-7B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-7B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-7B-Chat。相较于最初开源的Qwen-7B模型,我们现已将预训练模型和Chat模型更新到效果更优的版本。本仓库为Qwen-7B-Chat的仓库。
32
 
33
- 如果您想了解更多关于通义千问-7B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。
34
 
35
- **Qwen-7B** is the 7B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-7B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-7B, we release Qwen-7B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. Now we have updated both our pretrained and chat models with better performances. This repository is the one for Qwen-7B-Chat.
36
 
37
- For more details about Qwen, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository.
38
- <br>
39
 
40
  ## 要求(Requirements)
41
 
42
  * python 3.8及以上版本
43
  * pytorch 1.12及以上版本,推荐2.0及以上版本
44
  * 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项)
 
 
45
  * python 3.8 and above
46
  * pytorch 1.12 and above, 2.0 and above are recommended
47
  * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
48
- <br>
49
 
50
  ## 依赖项(Dependency)
51
 
@@ -54,21 +48,19 @@ For more details about Qwen, please refer to the [GitHub](https://github.com/Qwe
54
  To run Qwen-7B-Chat, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
55
 
56
  ```bash
57
- pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed
58
  ```
59
 
60
- 另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。
61
 
62
- In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage.
63
 
64
  ```bash
65
- git clone https://github.com/Dao-AILab/flash-attention
66
  cd flash-attention && pip install .
67
- # 下方安装可选,安装可能比较缓慢。
68
- # pip install csrc/layer_norm
69
- # pip install csrc/rotary
70
  ```
71
- <br>
72
 
73
  ## 快速使用(Quickstart)
74
 
@@ -81,6 +73,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
81
  from transformers.generation import GenerationConfig
82
 
83
  # Note: The default behavior now has injection attack prevention off.
 
 
84
  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
85
 
86
  # use bf16
@@ -92,8 +86,8 @@ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code
92
  # use auto mode, automatically select precision based on the device.
93
  model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
94
 
95
- # Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this.
96
- # model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
97
 
98
  # 第一轮对话 1st dialogue turn
99
  response, history = model.chat(tokenizer, "你好", history=None)
@@ -101,7 +95,7 @@ print(response)
101
  # 你好!很高兴为你提供帮助。
102
 
103
  # 第二轮对话 2nd dialogue turn
104
- response, history = model.chat(tokenizer, "给我讲一个年轻人奋斗创业最终取得成功的故事。", history=history)
105
  print(response)
106
  # 这是一个关于一个年轻人奋斗创业最终取得成功的故事。
107
  # 故事的主人公叫李明,他来自一个普通的家庭,父母都是普通的工人。从小,李明就立下了一个目标:要成为一名成功的企业家。
@@ -116,123 +110,23 @@ print(response)
116
  # 《奋斗创业:一个年轻人的成功之路》
117
  ```
118
 
119
- 关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。
120
-
121
- For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information.
122
- <br>
123
-
124
- ## Tokenizer
125
-
126
- > 注:作为术语的“tokenization”在中文中尚无共识的概念对应,本文档采用英文表达以利说明。
127
-
128
- 基于tiktoken的分词器有别于其他分词器,比如sentencepiece分词器。尤其在微调阶段,需要特别注意特殊token的使用。关于tokenizer的更多信息,以及微调时涉及的相关使用,请参阅[文档](https://github.com/QwenLM/Qwen/blob/main/tokenization_note_zh.md)。
129
-
130
- Our tokenizer based on tiktoken is different from other tokenizers, e.g., sentencepiece tokenizer. You need to pay attention to special tokens, especially in finetuning. For more detailed information on the tokenizer and related use in fine-tuning, please refer to the [documentation](https://github.com/QwenLM/Qwen/blob/main/tokenization_note.md).
131
- <br>
132
-
133
- ## 量化 (Quantization)
134
-
135
- ### 用法 (Usage)
136
-
137
- **请注意:我们更新量化方案为基于[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)的量化,提供Qwen-7B-Chat的Int4量化模型[点击这里](https://huggingface.co/Qwen/Qwen-7B-Chat-Int4)。相比此前方案,该方案在模型评测效果几乎无损,且存储需求更低,推理速度更优。**
138
-
139
- **Note: we provide a new solution based on [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ), and release an Int4 quantized model for Qwen-7B-Chat [Click here](https://huggingface.co/Qwen/Qwen-7B-Chat-Int4), which achieves nearly lossless model effects but improved performance on both memory costs and inference speed, in comparison with the previous solution.**
140
-
141
- 以下我们提供示例说明如何使用Int4量化模型。在开始使用前,请先保证满足要求(如torch 2.0及以上,transformers版本为4.32.0及以上,等等),并安装所需安装包:
142
-
143
- Here we demonstrate how to use our provided quantized models for inference. Before you start, make sure you meet the requirements of auto-gptq (e.g., torch 2.0 and above, transformers 4.32.0 and above, etc.) and install the required packages:
144
-
145
- ```bash
146
- pip install auto-gptq optimum
147
- ```
148
-
149
- 如安装`auto-gptq`遇到问题,我们建议您到官方[repo](https://github.com/PanQiWei/AutoGPTQ)搜索合适的预编译wheel。
150
-
151
- 随后即可使用和上述一致的用法调用量化模型:
152
-
153
- If you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a pre-build wheel.
154
-
155
- Then you can load the quantized model easily and run inference as same as usual:
156
-
157
- ```python
158
- model = AutoModelForCausalLM.from_pretrained(
159
- "Qwen/Qwen-7B-Chat-Int4",
160
- device_map="auto",
161
- trust_remote_code=True
162
- ).eval()
163
- response, history = model.chat(tokenizer, "你好", history=None)
164
- ```
165
-
166
-
167
-
168
- ### 效果评测
169
-
170
- 我们对BF16,Int8和Int4模型在基准评测上做了测试(使用zero-shot设置),发现量化模型效果损失较小,结果如下所示:
171
-
172
- We illustrate the zero-shot performance of both BF16, Int8 and Int4 models on the benchmark, and we find that the quantized model does not suffer from significant performance degradation. Results are shown below:
173
-
174
- | Quantization | MMLU | CEval (val) | GSM8K | Humaneval |
175
- | ------------- | :--------: | :----------: | :----: | :--------: |
176
- | BF16 | 55.8 | 59.7 | 50.3 | 37.2 |
177
- | Int8 | 55.4 | 59.4 | 48.3 | 34.8 |
178
- | Int4 | 55.1 | 59.2 | 49.7 | 29.9 |
179
-
180
- ### 推理速度 (Inference Speed)
181
-
182
- 我们测算了不同精度模型以及不同FlashAttn库版本下模型生成2048和8192个token的平均推理速度。如图所示:
183
 
184
- We measured the average inference speed of generating 2048 and 8192 tokens with different quantization levels and versions of flash-attention, respectively.
185
-
186
- | Quantization | FlashAttn | Speed (2048 tokens) | Speed (8192 tokens) |
187
- | ------------- | :-------: | :------------------:| :------------------:|
188
- | BF16 | v2 | 40.93 | 36.14 |
189
- | Int8 | v2 | 37.47 | 32.54 |
190
- | Int4 | v2 | 50.09 | 38.61 |
191
- | BF16 | v1 | 40.75 | 35.34 |
192
- | Int8 | v1 | 37.51 | 32.39 |
193
- | Int4 | v1 | 45.98 | 36.47 |
194
- | BF16 | Disabled | 37.55 | 33.56 |
195
- | Int8 | Disabled | 37.84 | 32.65 |
196
- | Int4 | Disabled | 48.12 | 36.70 |
197
-
198
- 具体而言,我们记录在长度为1的上下文的条件下生成8192个token的性能。评测运行于单张A100-SXM4-80G GPU,使用PyTorch 2.0.1和CUDA 11.8。推理速度是生成8192个token的速度均值。
199
-
200
- In detail, the setting of profiling is generating 8192 new tokens with 1 context token. The profiling runs on a single A100-SXM4-80G GPU with PyTorch 2.0.1 and CUDA 11.8. The inference speed is averaged over the generated 8192 tokens.
201
-
202
- 注意:以上Int4/Int8模型生成速度使用autogptq库给出,当前``AutoModelForCausalLM.from_pretrained``载入的模型生成速度会慢大约20%。我们已经将该问题汇报给HuggingFace团队,若有解决方案将即时更新。
203
-
204
- Note: The generation speed of the Int4/Int8 models mentioned above is provided by the autogptq library. The current speed of the model loaded using "AutoModelForCausalLM.from_pretrained" will be approximately 20% slower. We have reported this issue to the HuggingFace team and will update it promptly if a solution is available.
205
-
206
- ### 显存使用 (GPU Memory Usage)
207
-
208
- 我们还测算了不同模型精度编码2048个token及生成8192个token的峰值显存占用情况。(显存消耗在是否使用FlashAttn的情况下均类似。)结果如下所示:
209
-
210
- We also profile the peak GPU memory usage for encoding 2048 tokens as context (and generating single token) and generating 8192 tokens (with single token as context) under different quantization levels, respectively. (The GPU memory usage is similar when using flash-attention or not.)The results are shown below.
211
-
212
- | Quantization Level | Peak Usage for Encoding 2048 Tokens | Peak Usage for Generating 8192 Tokens |
213
- | ------------------ | :---------------------------------: | :-----------------------------------: |
214
- | BF16 | 16.99GB | 22.53GB |
215
- | Int8 | 11.20GB | 16.62GB |
216
- | Int4 | 8.21GB | 13.63GB |
217
-
218
- 上述性能测算使用[此脚本](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py)完成。
219
-
220
- The above speed and memory profiling are conducted using [this script](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py).
221
- <br>
222
 
223
  ## 模型细节(Model)
224
 
225
- 与Qwen-7B预训练模型相同,Qwen-7B-Chat模型规模基本情况如下所示:
226
 
227
- The details of the model architecture of Qwen-7B-Chat are listed as follows:
228
 
229
- | Hyperparameter | Value |
230
- |:----------------|:------:|
231
- | n_layers | 32 |
232
- | n_heads | 32 |
233
- | d_model | 4096 |
234
- | vocab size | 151851 |
235
- | sequence length | 8192 |
236
 
237
  在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法,
238
  即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。
@@ -246,7 +140,6 @@ For position encoding, FFN activation function, and normalization calculation me
246
  For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-7B-Chat uses a vocabulary of over 150K tokens.
247
  It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary.
248
  It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization.
249
- <br>
250
 
251
  ## 评测效果(Evaluation)
252
 
@@ -262,38 +155,32 @@ Note: Due to rounding errors caused by hardware and framework, differences in re
262
 
263
  #### C-Eval
264
 
265
- 在[C-Eval](https://arxiv.org/abs/2305.08322)验证集上,我们评价了Qwen-7B-Chat模型的0-shot & 5-shot准确率
266
-
267
- We demonstrate the 0-shot & 5-shot accuracy of Qwen-7B-Chat on C-Eval validation set
268
-
269
- | Model | Avg. Acc. |
270
- |:--------------------------------:|:---------:|
271
- | LLaMA2-7B-Chat | 31.9 |
272
- | LLaMA2-13B-Chat | 36.2 |
273
- | LLaMA2-70B-Chat | 44.3 |
274
- | ChatGLM2-6B-Chat | 52.6 |
275
- | InternLM-7B-Chat | 53.6 |
276
- | Baichuan2-7B-Chat | 55.6 |
277
- | Baichuan2-13B-Chat | 56.7 |
278
- | Qwen-7B-Chat (original) (0-shot) | 54.2 |
279
- | **Qwen-7B-Chat (0-shot)** | 59.7 |
280
- | **Qwen-7B-Chat (5-shot)** | 59.3 |
281
- | **Qwen-14B-Chat (0-shot)** | 69.8 |
282
- | **Qwen-14B-Chat (5-shot)** | **71.7** |
283
 
284
  C-Eval测试集上,Qwen-7B-Chat模型的zero-shot准确率结果如下:
285
 
286
  The zero-shot accuracy of Qwen-7B-Chat on C-Eval testing set is provided below:
287
 
288
- | Model | Avg. | STEM | Social Sciences | Humanities | Others |
289
- | :---------------------- | :------: | :--: | :-------------: | :--------: | :----: |
290
- | Chinese-Alpaca-Plus-13B | 41.5 | 36.6 | 49.7 | 43.1 | 41.2 |
291
- | Chinese-Alpaca-2-7B | 40.3 | - | - | - | - |
292
- | ChatGLM2-6B-Chat | 50.1 | 46.4 | 60.4 | 50.6 | 46.9 |
293
- | Baichuan-13B-Chat | 51.5 | 43.7 | 64.6 | 56.2 | 49.2 |
294
- | Qwen-7B-Chat (original) | 54.6 | 47.8 | 67.6 | 59.3 | 50.6 |
295
- | **Qwen-7B-Chat** | 58.6 | 53.3 | 72.1 | 62.8 | 52.0 |
296
- | **Qwen-14B-Chat** | **69.1** | 65.1 | 80.9 | 71.2 | 63.4 |
297
 
298
  在7B规模模型上,经过人类指令对齐的Qwen-7B-Chat模型,准确率在同类相近规模模型中仍然处于前列。
299
 
@@ -303,25 +190,19 @@ Compared with other pretrained models with comparable model size, the human-alig
303
 
304
  #### MMLU
305
 
306
- [MMLU](https://arxiv.org/abs/2009.03300)评测集上,Qwen-7B-Chat模型的 0-shot & 5-shot 准确率如下,效果同样在同类对齐模型中同样表现较优。
307
 
308
- The 0-shot & 5-shot accuracy of Qwen-7B-Chat on MMLU is provided below.
309
  The performance of Qwen-7B-Chat still on the top between other human-aligned models with comparable size.
310
 
311
- | Model | Avg. Acc. |
312
- |:--------------------------------:|:---------:|
313
- | ChatGLM2-6B-Chat | 46.0 |
314
- | LLaMA2-7B-Chat | 46.2 |
315
- | InternLM-7B-Chat | 51.1 |
316
- | Baichuan2-7B-Chat | 52.9 |
317
- | LLaMA2-13B-Chat | 54.6 |
318
- | Baichuan2-13B-Chat | 57.3 |
319
- | LLaMA2-70B-Chat | 63.8 |
320
- | Qwen-7B-Chat (original) (0-shot) | 53.9 |
321
- | **Qwen-7B-Chat (0-shot)** | 55.8 |
322
- | **Qwen-7B-Chat (5-shot)** | 57.0 |
323
- | **Qwen-14B-Chat (0-shot)** | 64.6 |
324
- | **Qwen-14B-Chat (5-shot)** | **66.5** |
325
 
326
  ### 代码评测(Coding Evaluation)
327
 
@@ -329,336 +210,145 @@ Qwen-7B-Chat在[HumanEval](https://github.com/openai/human-eval)的zero-shot Pas
329
 
330
  The zero-shot Pass@1 of Qwen-7B-Chat on [HumanEval](https://github.com/openai/human-eval) is demonstrated below
331
 
332
- | Model | Pass@1 |
333
- |:-----------------------:|:--------:|
334
- | ChatGLM2-6B-Chat | 11.0 |
335
- | LLaMA2-7B-Chat | 12.2 |
336
- | Baichuan2-7B-Chat | 13.4 |
337
- | InternLM-7B-Chat | 14.6 |
338
- | Baichuan2-13B-Chat | 17.7 |
339
- | LLaMA2-13B-Chat | 18.9 |
340
- | LLaMA2-70B-Chat | 32.3 |
341
- | Qwen-7B-Chat (original) | 24.4 |
342
- | **Qwen-7B-Chat** | 37.2 |
343
- | **Qwen-14B-Chat** | **43.9** |
344
-
345
- ### 数学评测(Mathematics Evaluation)
346
 
347
  在评测数学能力的[GSM8K](https://github.com/openai/grade-school-math)上,Qwen-7B-Chat的准确率结果如下
348
 
349
  The accuracy of Qwen-7B-Chat on GSM8K is shown below
350
 
351
- | Model | Acc. |
352
- |:------------------------------------:|:--------:|
353
- | LLaMA2-7B-Chat | 26.3 |
354
- | ChatGLM2-6B-Chat | 28.8 |
355
- | Baichuan2-7B-Chat | 32.8 |
356
- | InternLM-7B-Chat | 33.0 |
357
- | LLaMA2-13B-Chat | 37.1 |
358
- | Baichuan2-13B-Chat | 55.3 |
359
- | LLaMA2-70B-Chat | 59.3 |
360
- | **Qwen-7B-Chat (original) (0-shot)** | 41.1 |
361
- | **Qwen-7B-Chat (0-shot)** | 50.3 |
362
- | **Qwen-7B-Chat (8-shot)** | 54.1 |
363
- | **Qwen-14B-Chat (0-shot)** | **60.1** |
364
- | **Qwen-14B-Chat (8-shot)** | 59.3 |
365
 
366
  ### 长序列评测(Long-Context Understanding)
367
 
368
  通过NTK插值,LogN注意力缩放可以扩展Qwen-7B-Chat的上下文长度。在长文本摘要数据集[VCSUM](https://arxiv.org/abs/2305.05280)上(文本平均长度在15K左右),Qwen-7B-Chat的Rouge-L结果如下:
369
 
370
- **(若要启用这些技巧,请将config.json里的`use_dynamic_ntk`和`use_logn_attn`设置为true)**
371
 
372
  We introduce NTK-aware interpolation, LogN attention scaling to extend the context length of Qwen-7B-Chat. The Rouge-L results of Qwen-7B-Chat on long-text summarization dataset [VCSUM](https://arxiv.org/abs/2305.05280) (The average length of this dataset is around 15K) are shown below:
373
 
374
  **(To use these tricks, please set `use_dynamic_ntk` and `use_long_attn` to true in config.json.)**
375
 
376
- | Model | VCSUM (zh) |
377
- |:------------------|:----------:|
378
- | GPT-3.5-Turbo-16k | 16.0 |
379
- | LLama2-7B-Chat | 0.2 |
380
- | InternLM-7B-Chat | 13.0 |
381
- | ChatGLM2-6B-Chat | 16.3 |
382
- | **Qwen-7B-Chat** | **16.6** |
383
 
384
  ### 工具使用能力的评测(Tool Usage)
385
 
386
  #### ReAct Prompting
387
 
388
- 千问支持通过 [ReAct Prompting](https://arxiv.org/abs/2210.03629) 调用插件/工具/API。ReAct 也是 [LangChain](https://python.langchain.com/) 框架采用的主要方式之一。在我们开源的、用于评估工具使用能力的评测基准上,千问的表现如下:
389
-
390
- Qwen-Chat supports calling plugins/tools/APIs through [ReAct Prompting](https://arxiv.org/abs/2210.03629). ReAct is also one of the main approaches used by the [LangChain](https://python.langchain.com/) framework. In our evaluation benchmark for assessing tool usage capabilities, Qwen-Chat's performance is as follows:
391
-
392
- <table>
393
- <tr>
394
- <th colspan="4" align="center">Chinese Tool-Use Benchmark</th>
395
- </tr>
396
- <tr>
397
- <th align="center">Model</th><th align="center">Tool Selection (Acc.↑)</th><th align="center">Tool Input (Rouge-L↑)</th><th align="center">False Positive Error↓</th>
398
- </tr>
399
- <tr>
400
- <td>GPT-4</td><td align="center">95%</td><td align="center">0.90</td><td align="center">15.0%</td>
401
- </tr>
402
- <tr>
403
- <td>GPT-3.5</td><td align="center">85%</td><td align="center">0.88</td><td align="center">75.0%</td>
404
- </tr>
405
- <tr>
406
- <td>Qwen-7B-Chat</td><td align="center">98%</td><td align="center">0.91</td><td align="center">7.3%</td>
407
- </tr>
408
- <tr>
409
- <td>Qwen-14B-Chat</td><td align="center">98%</td><td align="center">0.93</td><td align="center">2.4%</td>
410
- </tr>
411
- </table>
412
 
413
  > 评测基准中出现的插件均没有出现在千问的训练集中。该基准评估了模型在多个候选插件中选择正确插件的准确率、传入插件的参数的合理性、以及假阳率。假阳率(False Positive)定义:在处理不该调用插件的请求时,错误地调用了插件。
414
 
415
- > The plugins that appear in the evaluation set do not appear in the training set of Qwen. This benchmark evaluates the accuracy of the model in selecting the correct plugin from multiple candidate plugins, the rationality of the parameters passed into the plugin, and the false positive rate. False Positive: Incorrectly invoking a plugin when it should not have been called when responding to a query.
416
 
417
- ![](assets/react_showcase_001.png)
418
- ![](assets/react_showcase_002.png)
419
 
420
- #### Code Interpreter
421
-
422
- 为了考察Qwen使用Python Code Interpreter完成数学解题、数据可视化、及文件处理与爬虫等任务的能力,我们专门建设并开源了一个评测这方面能力的[评测基准](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark)。
423
-
424
- 我们发现Qwen在生成代码的可执行率、结果正确性上均表现较好:
425
-
426
- To assess Qwen's ability to use the Python Code Interpreter for tasks such as mathematical problem solving, data visualization, and other general-purpose tasks such as file handling and web scraping, we have created and open-sourced a benchmark specifically designed for evaluating these capabilities. You can find the benchmark at this [link](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark).
427
-
428
- We have observed that Qwen performs well in terms of code executability and result accuracy when generating code:
429
-
430
- <table>
431
- <tr>
432
- <th colspan="4" align="center">Executable Rate of Generated Code (%)</th>
433
- </tr>
434
- <tr>
435
- <th align="center">Model</th><th align="center">Math↑</th><th align="center">Visualization↑</th><th align="center">General↑</th>
436
- </tr>
437
- <tr>
438
- <td>GPT-4</td><td align="center">91.9</td><td align="center">85.9</td><td align="center">82.8</td>
439
- </tr>
440
- <tr>
441
- <td>GPT-3.5</td><td align="center">89.2</td><td align="center">65.0</td><td align="center">74.1</td>
442
- </tr>
443
- <tr>
444
- <td>LLaMA2-7B-Chat</td>
445
- <td align="center">41.9</td>
446
- <td align="center">33.1</td>
447
- <td align="center">24.1 </td>
448
- </tr>
449
- <tr>
450
- <td>LLaMA2-13B-Chat</td>
451
- <td align="center">50.0</td>
452
- <td align="center">40.5</td>
453
- <td align="center">48.3 </td>
454
- </tr>
455
- <tr>
456
- <td>CodeLLaMA-7B-Instruct</td>
457
- <td align="center">85.1</td>
458
- <td align="center">54.0</td>
459
- <td align="center">70.7 </td>
460
- </tr>
461
- <tr>
462
- <td>CodeLLaMA-13B-Instruct</td>
463
- <td align="center">93.2</td>
464
- <td align="center">55.8</td>
465
- <td align="center">74.1 </td>
466
- </tr>
467
- <tr>
468
- <td>InternLM-7B-Chat-v1.1</td>
469
- <td align="center">78.4</td>
470
- <td align="center">44.2</td>
471
- <td align="center">62.1 </td>
472
- </tr>
473
- <tr>
474
- <td>InternLM-20B-Chat</td>
475
- <td align="center">70.3</td>
476
- <td align="center">44.2</td>
477
- <td align="center">65.5 </td>
478
- </tr>
479
- <tr>
480
- <td>Qwen-7B-Chat</td>
481
- <td align="center">82.4</td>
482
- <td align="center">64.4</td>
483
- <td align="center">67.2 </td>
484
- </tr>
485
- <tr>
486
- <td>Qwen-14B-Chat</td>
487
- <td align="center">89.2</td>
488
- <td align="center">84.1</td>
489
- <td align="center">65.5</td>
490
- </tr>
491
- </table>
492
-
493
- <table>
494
- <tr>
495
- <th colspan="4" align="center">Accuracy of Code Execution Results (%)</th>
496
- </tr>
497
- <tr>
498
- <th align="center">Model</th><th align="center">Math↑</th><th align="center">Visualization-Hard↑</th><th align="center">Visualization-Easy↑</th>
499
- </tr>
500
- <tr>
501
- <td>GPT-4</td><td align="center">82.8</td><td align="center">66.7</td><td align="center">60.8</td>
502
- </tr>
503
- <tr>
504
- <td>GPT-3.5</td><td align="center">47.3</td><td align="center">33.3</td><td align="center">55.7</td>
505
- </tr>
506
- <tr>
507
- <td>LLaMA2-7B-Chat</td>
508
- <td align="center">3.9</td>
509
- <td align="center">14.3</td>
510
- <td align="center">39.2 </td>
511
- </tr>
512
- <tr>
513
- <td>LLaMA2-13B-Chat</td>
514
- <td align="center">8.3</td>
515
- <td align="center">8.3</td>
516
- <td align="center">40.5 </td>
517
- </tr>
518
- <tr>
519
- <td>CodeLLaMA-7B-Instruct</td>
520
- <td align="center">14.3</td>
521
- <td align="center">26.2</td>
522
- <td align="center">60.8 </td>
523
- </tr>
524
- <tr>
525
- <td>CodeLLaMA-13B-Instruct</td>
526
- <td align="center">28.2</td>
527
- <td align="center">27.4</td>
528
- <td align="center">62.0 </td>
529
- </tr>
530
- <tr>
531
- <td>InternLM-7B-Chat-v1.1</td>
532
- <td align="center">28.5</td>
533
- <td align="center">4.8</td>
534
- <td align="center">40.5 </td>
535
- </tr>
536
- <tr>
537
- <td>InternLM-20B-Chat</td>
538
- <td align="center">34.6</td>
539
- <td align="center">21.4</td>
540
- <td align="center">45.6 </td>
541
- </tr>
542
- <tr>
543
- <td>Qwen-7B-Chat</td>
544
- <td align="center">41.9</td>
545
- <td align="center">40.5</td>
546
- <td align="center">54.4 </td>
547
- </tr>
548
- <tr>
549
- <td>Qwen-14B-Chat</td>
550
- <td align="center">58.4</td>
551
- <td align="center">53.6</td>
552
- <td align="center">59.5</td>
553
- </tr>
554
- </table>
555
 
556
- <p align="center">
557
- <br>
558
- <img src="assets/code_interpreter_showcase_001.jpg" />
559
- <br>
560
- <p>
561
 
562
  #### Huggingface Agent
563
 
564
  千问还具备作为 [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents) 的能力。它在 Huggingface 提供的run模式评测基准上的表现如下:
565
 
566
- Qwen-Chat also has the capability to be used as a [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents). Its performance on the run-mode benchmark provided by HuggingFace is as follows:
567
-
568
- <table>
569
- <tr>
570
- <th colspan="4" align="center">HuggingFace Agent Benchmark- Run Mode</th>
571
- </tr>
572
- <tr>
573
- <th align="center">Model</th><th align="center">Tool Selection↑</th><th align="center">Tool Used↑</th><th align="center">Code↑</th>
574
- </tr>
575
- <tr>
576
- <td>GPT-4</td><td align="center">100</td><td align="center">100</td><td align="center">97.4</td>
577
- </tr>
578
- <tr>
579
- <td>GPT-3.5</td><td align="center">95.4</td><td align="center">96.3</td><td align="center">87.0</td>
580
- </tr>
581
- <tr>
582
- <td>StarCoder-Base-15B</td><td align="center">86.1</td><td align="center">87.0</td><td align="center">68.9</td>
583
- </tr>
584
- <tr>
585
- <td>StarCoder-15B</td><td align="center">87.0</td><td align="center">88.0</td><td align="center">68.9</td>
586
- </tr>
587
- <tr>
588
- <td>Qwen-7B-Chat</td><td align="center">87.0</td><td align="center">87.0</td><td align="center">71.5</td>
589
- </tr>
590
- <tr>
591
- <td>Qwen-14B-Chat</td><td align="center">93.5</td><td align="center">94.4</td><td align="center">87.0</td>
592
- </tr>
593
- </table>
594
-
595
- <table>
596
- <tr>
597
- <th colspan="4" align="center">HuggingFace Agent Benchmark - Chat Mode</th>
598
- </tr>
599
- <tr>
600
- <th align="center">Model</th><th align="center">Tool Selection↑</th><th align="center">Tool Used↑</th><th align="center">Code↑</th>
601
- </tr>
602
- <tr>
603
- <td>GPT-4</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">98.5</td>
604
- </tr>
605
- <tr>
606
- <td>GPT-3.5</td><td align="center">97.3</td><td align="center">96.8</td><td align="center">89.6</td>
607
- </tr>
608
- <tr>
609
- <td>StarCoder-Base-15B</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">91.1</td>
610
- </tr>
611
- <tr>
612
- <td>StarCoder-15B</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">89.6</td>
613
- </tr>
614
- <tr>
615
- <td>Qwen-7B-Chat</td><td align="center">94.7</td><td align="center">94.7</td><td align="center">85.1</td>
616
- </tr>
617
- <tr>
618
- <td>Qwen-14B-Chat</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">95.5</td>
619
- </tr>
620
- </table>
621
 
622
- <br>
 
 
 
 
 
623
 
624
- ## x86 平台 (x86 Platforms)
625
- 在 酷睿™/至强® 可扩展处理器或 Arc™ GPU 上部署量化模型时,建议使用 [OpenVINO™ Toolkit](https://docs.openvino.ai/2023.3/gen_ai_guide.html)以充分利用硬件,实现更好的推理性能。您可以安装并运行此 [example notebook](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/254-llm-chatbot)。相关问题,您可在[OpenVINO repo](https://github.com/openvinotoolkit/openvino_notebooks/issues)中提交。
626
 
627
- When deploy on Core™/Xeon® Scalable Processors or with Arc™ GPU, [OpenVINO™ Toolkit](https://docs.openvino.ai/2023.3/gen_ai_guide.html) is recommended. You can install and run this [example notebook](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/254-llm-chatbot). For related issues, you are welcome to file an issue at [OpenVINO repo](https://github.com/openvinotoolkit/openvino_notebooks/issues).
628
 
629
- ## FAQ
630
 
631
- 如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。
 
 
632
 
633
- If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue.
634
- <br>
635
 
636
- ## 引用 (Citation)
637
 
638
- 如果你觉得我们的工作对你有帮助,欢迎引用!
639
 
640
- If you find our work helpful, feel free to give us a cite.
641
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
642
  ```
643
- @article{qwen,
644
- title={Qwen Technical Report},
645
- author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu},
646
- journal={arXiv preprint arXiv:2309.16609},
647
- year={2023}
648
- }
649
- ```
650
- <br>
 
 
651
 
652
  ## 使用协议(License Agreement)
653
 
654
- 我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,请填写[问卷](https://dashscope.console.aliyun.com/openModelApply/qianwen)申请。
655
 
656
- Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/qianwen) to apply.
657
- <br>
658
 
659
  ## 联系我们(Contact Us)
660
 
661
- 如果你想给我们的研发团队和产品团队留言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件(qianwen_opensource@alibabacloud.com)联系我们。
662
 
663
- If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to qianwen_opensource@alibabacloud.com.
664
 
 
6
  - qwen
7
  pipeline_tag: text-generation
8
  inference: false
 
 
 
9
  ---
10
 
11
  # Qwen-7B-Chat
12
 
13
  <p align="center">
14
+ <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo.jpg" width="400"/>
15
  <p>
16
  <br>
17
 
18
  <p align="center">
19
+ Qwen-7B <a href="https://modelscope.cn/models/qwen/Qwen-7B/summary">🤖 </a> | <a href="https://huggingface.co/Qwen/Qwen-7B">🤗</a>&nbsp | Qwen-7B-Chat <a href="https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary">🤖 </a>| <a href="https://huggingface.co/Qwen/Qwen-7B-Chat">🤗</a>&nbsp | &nbsp<a href="https://modelscope.cn/studios/qwen/Qwen-7B-Chat-Demo/summary">Demo</a>&nbsp | &nbsp<a href="https://github.com/QwenLM/Qwen-7B/blob/main/tech_memo.md">Report</a>
 
 
20
  </p>
21
  <br>
22
 
 
23
  ## 介绍(Introduction)
24
 
25
+ **通义千问-7B(Qwen-7B)**是阿里云研发的通义千问大模型系列的70亿参数规模的模型。Qwen-7B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-7B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-7B-Chat。本仓库为Qwen-7B-Chat的仓库。
26
 
27
+ 如果您想了解更多关于通义千问-7B开源模型的细节,我们建议您参阅[Github代码库](https://github.com/QwenLM/Qwen-7B)。
28
 
29
+ **Qwen-7B** is the 7B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Aibaba Cloud. Qwen-7B`is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-7B, we release Qwen-7B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. This repository is the one for Qwen-7B-Chat.
30
 
31
+ For more details about the open-source model of Qwen-7B, please refer to the [Github](https://github.com/QwenLM/Qwen-7B) code repository.
 
32
 
33
  ## 要求(Requirements)
34
 
35
  * python 3.8及以上版本
36
  * pytorch 1.12及以上版本,推荐2.0及以上版本
37
  * 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项)
38
+
39
+
40
  * python 3.8 and above
41
  * pytorch 1.12 and above, 2.0 and above are recommended
42
  * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.)
 
43
 
44
  ## 依赖项(Dependency)
45
 
 
48
  To run Qwen-7B-Chat, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries.
49
 
50
  ```bash
51
+ pip install transformers==4.31.0 accelerate tiktoken einops
52
  ```
53
 
54
+ 另外,推荐安装`flash-attention`库,以实现更高的效率和更低的显存占用。
55
 
56
+ In addition, it is recommended to install the `flash-attention` library for higher efficiency and lower memory usage.
57
 
58
  ```bash
59
+ git clone -b v1.0.8 https://github.com/Dao-AILab/flash-attention
60
  cd flash-attention && pip install .
61
+ pip install csrc/layer_norm
62
+ pip install csrc/rotary
 
63
  ```
 
64
 
65
  ## 快速使用(Quickstart)
66
 
 
73
  from transformers.generation import GenerationConfig
74
 
75
  # Note: The default behavior now has injection attack prevention off.
76
+ # To remove the strategy, you can add `allowed_special`, which accepts the string "all" or a `set` of special tokens.
77
+ # For example: tokens = tokenizer(text, allowed_special="all")
78
  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
79
 
80
  # use bf16
 
86
  # use auto mode, automatically select precision based on the device.
87
  model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
88
 
89
+ # Specify hyperparameters for generation
90
+ model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
91
 
92
  # 第一轮对话 1st dialogue turn
93
  response, history = model.chat(tokenizer, "你好", history=None)
 
95
  # 你好!很高兴为你提供帮助。
96
 
97
  # 第二轮对话 2nd dialogue turn
98
+ response, history = model.chat(tokenizer, "给我讲一个年轻人奋斗创业最终取得成功的故事。", history=history)
99
  print(response)
100
  # 这是一个关于一个年轻人奋斗创业最终取得成功的故事。
101
  # 故事的主人公叫李明,他来自一个普通的家庭,父母都是普通的工人。从小,李明就立下了一个目标:要成为一名成功的企业家。
 
110
  # 《奋斗创业:一个年轻人的成功之路》
111
  ```
112
 
113
+ 关于更多的使用说明,请参考我们的[Github repo](https://github.com/QwenLM/Qwen-7B)获取更多信息。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ For more information, please refer to our [Github repo](https://github.com/QwenLM/Qwen-7B) for more information.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  ## 模型细节(Model)
118
 
119
+ 与Qwen-7B预训练模型相同,Qwen-7B-Chat模型规模基本情况如下所示
120
 
121
+ The details of the model architecture of Qwen-7B-Chat are listed as follows
122
 
123
+ | Hyperparameter | Value |
124
+ |:------|:------|
125
+ | n_layers | 32 |
126
+ | n_heads | 32 |
127
+ | d_model | 4096 |
128
+ | vocab size | 151851 |
129
+ | sequence length | 2048 |
130
 
131
  在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法,
132
  即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。
 
140
  For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-7B-Chat uses a vocabulary of over 150K tokens.
141
  It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary.
142
  It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization.
 
143
 
144
  ## 评测效果(Evaluation)
145
 
 
155
 
156
  #### C-Eval
157
 
158
+ 在[C-Eval](https://arxiv.org/abs/2305.08322)验证集上,我们评价了Qwen-7B-Chat模型的zero-shot准确率
159
+
160
+ We demonstrate the zero-shot accuracy of Qwen-7B-Chat on C-Eval validation set
161
+
162
+ | Model | Avg. Acc. |
163
+ |:--------------|:------:|
164
+ | LLaMA2-7B-Chat | 31.9 |
165
+ | LLaMA2-13B-Chat | 40.6 |
166
+ | Chinese-Alpaca-2-7B | 41.3 |
167
+ | Chinese-Alpaca-Plus-13B | 43.3 |
168
+ | Baichuan-13B-Chat | 50.4 |
169
+ | ChatGLM2-6B-Chat | 50.7 |
170
+ | InternLM-7B-Chat | 53.2 |
171
+ | **Qwen-7B-Chat** | **54.2** |
 
 
 
 
172
 
173
  C-Eval测试集上,Qwen-7B-Chat模型的zero-shot准确率结果如下:
174
 
175
  The zero-shot accuracy of Qwen-7B-Chat on C-Eval testing set is provided below:
176
 
177
+ | Model | Avg. | STEM | Social Sciences | Humanities | Others |
178
+ |:--------------|:------:|:------:|:------:|:------:|:------:|
179
+ | Chinese-Alpaca-Plus-13B | 41.5 | 36.6 | 49.7 | 43.1 | 41.2 |
180
+ | Chinese-Alpaca-2-7B | 40.3 | - | - | - | - |
181
+ | ChatGLM2-6B-Chat | 50.1 | 46.4 | 60.4 | 50.6 | 46.9 |
182
+ | Baichuan-13B-Chat | 51.5 | 43.7 | 64.6 | 56.2 | 49.2 |
183
+ | **Qwen-7B-Chat** | **54.6** | 47.8 | 67.6 | 59.3 | 50.6 |
 
 
184
 
185
  在7B规模模型上,经过人类指令对齐的Qwen-7B-Chat模型,准确率在同类相近规模模型中仍然处于前列。
186
 
 
190
 
191
  #### MMLU
192
 
193
+ [MMLU](https://arxiv.org/abs/2009.03300)评测集上,Qwen-7B-Chat模型的zero-shot准确率如下,效果同样在同类对齐模型中同样表现较优。
194
 
195
+ The zero-shot accuracy of Qwen-7B-Chat on MMLU is provided below.
196
  The performance of Qwen-7B-Chat still on the top between other human-aligned models with comparable size.
197
 
198
+ | Model | Avg. Acc. |
199
+ |:--------------|:------:|
200
+ | ChatGLM2-6B-Chat | 45.5 |
201
+ | LLaMA2-7B-Chat | 47.0 |
202
+ | InternLM-7B-Chat | 50.8 |
203
+ | Baichuan-13B-Chat | 52.1 |
204
+ | ChatGLM2-12B-Chat | 52.1 |
205
+ | **Qwen-7B-Chat** | **53.9** |
 
 
 
 
 
 
206
 
207
  ### 代码评测(Coding Evaluation)
208
 
 
210
 
211
  The zero-shot Pass@1 of Qwen-7B-Chat on [HumanEval](https://github.com/openai/human-eval) is demonstrated below
212
 
213
+ | Model | Pass@1 |
214
+ |:--------------|:------:|
215
+ | LLaMA2-7B-Chat | 12.2 |
216
+ | InternLM-7B-Chat | 14.0 |
217
+ | Baichuan-13B-Chat | 16.5 |
218
+ | LLaMA2-13B-Chat | 18.9 |
219
+ | **Qwen-7B-Chat** | **24.4** |
220
+
221
+ ### 数学评测
 
 
 
 
 
222
 
223
  在评测数学能力的[GSM8K](https://github.com/openai/grade-school-math)上,Qwen-7B-Chat的准确率结果如下
224
 
225
  The accuracy of Qwen-7B-Chat on GSM8K is shown below
226
 
227
+ | Model | Zero-shot Acc. | 4-shot Acc. |
228
+ |:--------------|:------:|:------:|
229
+ | ChatGLM2-6B-Chat | - | 28.0 |
230
+ | LLaMA2-7B-Chat | 20.4 | 28.2 |
231
+ | LLaMA2-13B-Chat | 29.4 | 36.7 |
232
+ | InternLM-7B-Chat | 32.6 | 34.5 |
233
+ | Baichuan-13B-Chat | - | 36.3 |
234
+ | ChatGLM2-12B-Chat | - | 38.1 |
235
+ | **Qwen-7B-Chat** | **41.1** | **43.5** |
 
 
 
 
 
236
 
237
  ### 长序列评测(Long-Context Understanding)
238
 
239
  通过NTK插值,LogN注意力缩放可以扩展Qwen-7B-Chat的上下文长度。在长文本摘要数据集[VCSUM](https://arxiv.org/abs/2305.05280)上(文本平均长度在15K左右),Qwen-7B-Chat的Rouge-L结果如下:
240
 
241
+ **(若要启用这些技巧,请将config.json里的`use_dynamc_ntk`和`use_logn_attn`设置为true)**
242
 
243
  We introduce NTK-aware interpolation, LogN attention scaling to extend the context length of Qwen-7B-Chat. The Rouge-L results of Qwen-7B-Chat on long-text summarization dataset [VCSUM](https://arxiv.org/abs/2305.05280) (The average length of this dataset is around 15K) are shown below:
244
 
245
  **(To use these tricks, please set `use_dynamic_ntk` and `use_long_attn` to true in config.json.)**
246
 
247
+ | Model | VCSUM (zh) |
248
+ |:----------------|:-------:|
249
+ | GPT-3.5-Turbo-16k | 16.0 |
250
+ | LLama2-7B-Chat | 0.2 |
251
+ | InternLM-7B-Chat | 13.0 |
252
+ | ChatGLM2-6B-Chat | 16.3 |
253
+ | **Qwen-7B-Chat** | **16.6** |
254
 
255
  ### 工具使用能力的评测(Tool Usage)
256
 
257
  #### ReAct Prompting
258
 
259
+ 千问支持通过 [ReAct Prompting](https://arxiv.org/abs/2210.03629) 调用插件/工具/API。ReAct 也是 [LangChain](https://python.langchain.com/) 框架采用的主要方式之一。在即将开源的、用于评估工具使用能力的自建评测基准上,千问的表现如下:
260
+
261
+ Qwen-7B-Chat supports calling plugins/tools/APIs through [ReAct Prompting](https://arxiv.org/abs/2210.03629). ReAct is also one of the main approaches used by the [LangChain](https://python.langchain.com/) framework. In the soon-to-be-released evaluation benchmark for assessing tool usage capabilities, Qwen-7B-Chat's performance is as follows:
262
+
263
+ | Model | Tool Selection (Acc.↑) | Tool Input (Rouge-L↑) | False Positive Error↓ |
264
+ |:-----------------|:----------------------:|:---------------------:|:---------------------:|
265
+ | GPT-4 | 95% | **0.90** | 15% |
266
+ | GPT-3.5 | 85% | 0.88 | 75% |
267
+ | **Qwen-7B-Chat** | **99%** | 0.89 | **8.5%** |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
  > 评测基准中出现的插件均没有出现在千问的训练集中。该基准评估了模型在多个候选插件中选择正确插件的准确率、传入插件的参数的合理性、以及假阳率。假阳率(False Positive)定义:在处理不该调用插件的请求时,错误地调用了插件。
270
 
271
+ > The plugins that appear in the evaluation set do not appear in the training set of Qwen-7B-Chat. This benchmark evaluates the accuracy of the model in selecting the correct plugin from multiple candidate plugins, the rationality of the parameters passed into the plugin, and the false positive rate. False Positive: Incorrectly invoking a plugin when it should not have been called when responding to a query.
272
 
273
+ 关于 ReAct Prompting 的 prompt 怎么写、怎么使用,请参考 [ReAct 样例说明](examples/react_prompt.md)。使用工具能使模型更好地完成任务。基于千问的工具使用能力,我们能实现下图所展示的效果:
 
274
 
275
+ For how to write and use prompts for ReAct Prompting, please refer to [the ReAct examples](examples/react_prompt.md). The use of tools can enable the model to better perform tasks, as shown in the following figures:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
+ ![](assets/react_showcase_001.png)
278
+ ![](assets/react_showcase_002.png)
 
 
 
279
 
280
  #### Huggingface Agent
281
 
282
  千问还具备作为 [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents) 的能力。它在 Huggingface 提供的run模式评测基准上的表现如下:
283
 
284
+ Qwen-7B-Chat also has the capability to be used as a [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents). Its performance on the run-mode benchmark provided by HuggingFace is as follows:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
 
286
+ | Model | Tool Selection↑ | Tool Used↑ | Code↑ |
287
+ |:-|:-:|:-:|:-:|
288
+ |GPT-4 | **100** | **100** | **97.41** |
289
+ |GPT-3.5 | 95.37 | 96.30 | 87.04 |
290
+ |StarCoder-15.5B | 87.04 | 87.96 | 68.89 |
291
+ | **Qwen-7B** | 90.74 | 92.59 | 74.07 |
292
 
293
+ ## 量化(Quantization)
 
294
 
295
+ 如希望使用更低精度的量化模型,如4比特和8比特的模型,我们提供了简单的示例来说明如何快速使用量化模型。在开始前,确保你已经安装了`bitsandbytes`。请注意,`bitsandbytes`的安装要求是:
296
 
297
+ We provide examples to show how to load models in `NF4` and `Int8`. For starters, make sure you have implemented `bitsandbytes`. Note that the requirements for `bitsandbytes` are:
298
 
299
+ ```
300
+ **Requirements** Python >=3.8. Linux distribution (Ubuntu, MacOS, etc.) + CUDA > 10.0.
301
+ ```
302
 
303
+ Windows用户需安装特定版本的`bitsandbytes`,可选项包括[bitsandbytes-windows-webui](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)
 
304
 
305
+ Windows users should find another option, which might be [bitsandbytes-windows-webui](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels).
306
 
307
+ 你只需要在`AutoModelForCausalLM.from_pretrained`中添加你的量化配置,即可使用量化模型。如下所示:
308
 
309
+ Then you only need to add your quantization configuration to `AutoModelForCausalLM.from_pretrained`. See the example below:
310
 
311
+ ```python
312
+ from transformers import AutoModelForCausalLM, BitsAndBytesConfig
313
+
314
+ # quantization configuration for NF4 (4 bits)
315
+ quantization_config = BitsAndBytesConfig(
316
+ load_in_4bit=True,
317
+ bnb_4bit_quant_type='nf4',
318
+ bnb_4bit_compute_dtype=torch.bfloat16
319
+ )
320
+
321
+ # quantization configuration for Int8 (8 bits)
322
+ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
323
+
324
+ model = AutoModelForCausalLM.from_pretrained(
325
+ "Qwen/Qwen-7B-Chat",
326
+ device_map="cuda:0",
327
+ quantization_config=quantization_config,
328
+ max_memory=max_memory,
329
+ trust_remote_code=True,
330
+ ).eval()
331
  ```
332
+
333
+ 上述方法可以让我们将模型量化成`NF4`和`Int8`精度的模型进行读取,帮助我们节省显存开销。我们也提供了相关性能数据。我们发现尽管模型在效果上存在损失,但模型的显存开销大幅降低。
334
+
335
+ With this method, it is available to load Qwen-7B-Chat in `NF4`and `Int8`, which saves you memory usage. We provide related statistics of model performance below. We find that the quantization downgrades the effectiveness slightly but significantly increases inference efficiency and reduces memory costs.
336
+
337
+ | Precision | MMLU | Memory |
338
+ | :---------| :-------: | :-----: |
339
+ | BF16 | 56.7 | 16.2G |
340
+ | Int8 | 52.8 | 10.1G |
341
+ | NF4 | 48.9 | 7.4G |
342
 
343
  ## 使用协议(License Agreement)
344
 
345
+ 我们的代码和模型权重对学术研究完全开放,并支持商用。请查看LICENSE了解具体的开源协议细节。
346
 
347
+ Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](LICENSE) for more details about the license.
 
348
 
349
  ## 联系我们(Contact Us)
350
 
351
+ 如果你想给我们的研发团队和产品团队留言,请通过邮件(qianwen_opensource@alibabacloud.com)联系我们。
352
 
353
+ If you are interested to leave a message to either our research team or product team, feel free to send an email to qianwen_opensource@alibabacloud.com.
354
 
assets/code_interpreter_showcase_001.jpg DELETED
Binary file (138 kB)
 
assets/logo.jpg CHANGED
assets/qwen_tokenizer.png ADDED
assets/react_tutorial_001.png ADDED
assets/react_tutorial_002.png ADDED
assets/tokenizer.pdf ADDED
Binary file (24.7 kB). View file
 
assets/tokenizer.png ADDED
model-00001-of-00008.safetensors → assets/wanx_colorful_black.png RENAMED
File without changes
assets/wechat.png DELETED
Binary file (68.4 kB)
 
cache_autogptq_cuda_256.cpp DELETED
@@ -1,198 +0,0 @@
1
- #include <torch/all.h>
2
- #include <torch/python.h>
3
- #include <c10/cuda/CUDAGuard.h>
4
-
5
- // adapted from https://github.com/PanQiWei/AutoGPTQ/blob/main/autogptq_extension/cuda_256/autogptq_cuda_256.cpp
6
- void vecquant8matmul_cuda(
7
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
8
- torch::Tensor scales, torch::Tensor zeros,
9
- torch::Tensor g_idx
10
- );
11
-
12
- void vecquant8matmul(
13
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
14
- torch::Tensor scales, torch::Tensor zeros,
15
- torch::Tensor g_idx
16
- ) {
17
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
18
- vecquant8matmul_cuda(vec, mat, mul, scales, zeros, g_idx);
19
- }
20
-
21
- void vecquant8matmul_batched_cuda(
22
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
23
- torch::Tensor scales, torch::Tensor zeros
24
- );
25
-
26
- void vecquant8matmul_batched(
27
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
28
- torch::Tensor scales, torch::Tensor zeros
29
- ) {
30
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
31
- vecquant8matmul_batched_cuda(vec, mat, mul, scales, zeros);
32
- }
33
-
34
- void vecquant8matmul_batched_column_compression_cuda(
35
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
36
- torch::Tensor scales, torch::Tensor zeros
37
- );
38
-
39
- void vecquant8matmul_batched_column_compression(
40
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
41
- torch::Tensor scales, torch::Tensor zeros
42
- ) {
43
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
44
- vecquant8matmul_batched_column_compression_cuda(vec, mat, mul, scales, zeros);
45
- }
46
-
47
- void vecquant4matmul_batched_cuda(
48
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
49
- torch::Tensor scales, torch::Tensor zeros
50
- );
51
-
52
- void vecquant4matmul_batched(
53
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
54
- torch::Tensor scales, torch::Tensor zeros
55
- ) {
56
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
57
- vecquant4matmul_batched_cuda(vec, mat, mul, scales, zeros);
58
- }
59
-
60
- void vecquant4matmul_batched_column_compression_cuda(
61
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
62
- torch::Tensor scales, torch::Tensor zeros
63
- );
64
-
65
- void vecquant4matmul_batched_column_compression(
66
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
67
- torch::Tensor scales, torch::Tensor zeros
68
- ) {
69
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
70
- vecquant4matmul_batched_column_compression_cuda(vec, mat, mul, scales, zeros);
71
- }
72
-
73
- void vecquant8matmul_batched_old_cuda(
74
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
75
- torch::Tensor scales, torch::Tensor zeros
76
- );
77
-
78
- void vecquant8matmul_batched_old(
79
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
80
- torch::Tensor scales, torch::Tensor zeros
81
- ) {
82
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
83
- vecquant8matmul_batched_old_cuda(vec, mat, mul, scales, zeros);
84
- }
85
-
86
-
87
- void vecquant4matmul_batched_old_cuda(
88
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
89
- torch::Tensor scales, torch::Tensor zeros
90
- );
91
-
92
- void vecquant4matmul_batched_old(
93
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
94
- torch::Tensor scales, torch::Tensor zeros
95
- ) {
96
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
97
- vecquant4matmul_batched_old_cuda(vec, mat, mul, scales, zeros);
98
- }
99
-
100
- void vecquant8matmul_batched_column_compression_old_cuda(
101
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
102
- torch::Tensor scales, torch::Tensor zeros
103
- );
104
-
105
- void vecquant8matmul_batched_column_compression_old(
106
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
107
- torch::Tensor scales, torch::Tensor zeros
108
- ) {
109
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
110
- vecquant8matmul_batched_column_compression_old_cuda(vec, mat, mul, scales, zeros);
111
- }
112
-
113
- void vecquant4matmul_batched_column_compression_old_cuda(
114
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
115
- torch::Tensor scales, torch::Tensor zeros
116
- );
117
-
118
- void vecquant4matmul_batched_column_compression_old(
119
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
120
- torch::Tensor scales, torch::Tensor zeros
121
- ) {
122
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
123
- vecquant4matmul_batched_column_compression_old_cuda(vec, mat, mul, scales, zeros);
124
- }
125
-
126
-
127
-
128
- void vecquant8matmul_batched_faster_cuda(
129
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
130
- torch::Tensor scales, torch::Tensor zeros
131
- );
132
-
133
- void vecquant8matmul_batched_faster(
134
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
135
- torch::Tensor scales, torch::Tensor zeros
136
- ) {
137
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
138
- vecquant8matmul_batched_faster_cuda(vec, mat, mul, scales, zeros);
139
- }
140
-
141
-
142
- void vecquant8matmul_batched_faster_old_cuda(
143
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
144
- torch::Tensor scales, torch::Tensor zeros
145
- );
146
-
147
- void vecquant8matmul_batched_faster_old(
148
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
149
- torch::Tensor scales, torch::Tensor zeros
150
- ) {
151
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
152
- vecquant8matmul_batched_faster_old_cuda(vec, mat, mul, scales, zeros);
153
- }
154
-
155
- void vecquant8matmul_batched_column_compression_faster_cuda(
156
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
157
- torch::Tensor scales, torch::Tensor zeros
158
- );
159
-
160
- void vecquant8matmul_batched_column_compression_faster(
161
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
162
- torch::Tensor scales, torch::Tensor zeros
163
- ) {
164
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
165
- vecquant8matmul_batched_column_compression_faster_cuda(vec, mat, mul, scales, zeros);
166
- }
167
-
168
-
169
- void vecquant8matmul_batched_column_compression_faster_old_cuda(
170
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
171
- torch::Tensor scales, torch::Tensor zeros
172
- );
173
-
174
- void vecquant8matmul_batched_column_compression_faster_old(
175
- torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
176
- torch::Tensor scales, torch::Tensor zeros
177
- ) {
178
- const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));
179
- vecquant8matmul_batched_column_compression_faster_old_cuda(vec, mat, mul, scales, zeros);
180
- }
181
-
182
-
183
-
184
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
185
- m.def("vecquant8matmul", &vecquant8matmul, "Vector 8-bit Quantized Matrix Multiplication (CUDA) (desc_act)");
186
- m.def("vecquant8matmul_batched", &vecquant8matmul_batched, "Vector 8-bit Batched Quantized Matrix Multiplication (CUDA) (desc_act)");
187
- m.def("vecquant8matmul_batched_old", &vecquant8matmul_batched_old, "Vector 8-bit old Batched Quantized Matrix Multiplication (CUDA) (desc_act)");
188
- m.def("vecquant8matmul_batched_faster", &vecquant8matmul_batched_faster, "Vector 8-bit old Batched Quantized Matrix Multiplication (CUDA) (desc_act)");
189
- m.def("vecquant8matmul_batched_faster_old", &vecquant8matmul_batched_faster_old, "Vector 8-bit old Batched Quantized Matrix Multiplication (CUDA) (desc_act)");
190
- m.def("vecquant4matmul_batched_old", &vecquant4matmul_batched_old, "Vector 4-bit old Batched Quantized Matrix Multiplication (CUDA) (desc_act)");
191
- m.def("vecquant8matmul_batched_column_compression", &vecquant8matmul_batched_column_compression, "Vector 8-bit Batched Quantized Matrix Multiplication (CUDA) with weight's column compressed (desc_act)");
192
- m.def("vecquant8matmul_batched_column_compression_old", &vecquant8matmul_batched_column_compression_old, "Vector old 8-bit Batched Quantized Matrix Multiplication (CUDA) with weight's column compressed (desc_act)");
193
- m.def("vecquant8matmul_batched_column_compression_faster", &vecquant8matmul_batched_column_compression_faster, "Vector old 8-bit Batched Quantized Matrix Multiplication (CUDA) with weight's column compressed (desc_act)");
194
- m.def("vecquant8matmul_batched_column_compression_faster_old", &vecquant8matmul_batched_column_compression_faster_old, "Vector old 8-bit Batched Quantized Matrix Multiplication (CUDA) with weight's column compressed (desc_act)");
195
- m.def("vecquant4matmul_batched_column_compression_old", &vecquant4matmul_batched_column_compression_old, "Vector old 4-bit Batched Quantized Matrix Multiplication (CUDA) with weight's column compressed (desc_act)");
196
- m.def("vecquant4matmul_batched", &vecquant4matmul_batched, "Vector 4-bit Batched Quantized Matrix Multiplication (CUDA) (desc_act)");
197
- m.def("vecquant4matmul_batched_column_compression", &vecquant4matmul_batched_column_compression, "Vector 4-bit Batched Quantized Matrix Multiplication (CUDA) with weight's column compressed (desc_act)");
198
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cache_autogptq_cuda_kernel_256.cu DELETED
@@ -1,1708 +0,0 @@
1
- #define _CRT_SECURE_NO_WARNINGS
2
- #include <torch/all.h>
3
- #include <torch/python.h>
4
- #include <cuda.h>
5
- #include <cuda_runtime.h>
6
- #include <cuda_fp16.h>
7
- #include <stdint.h>
8
-
9
- #if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700) || defined(USE_ROCM)
10
- // adapted from https://github.com/PanQiWei/AutoGPTQ/blob/main/autogptq_extension/cuda_256/autogptq_cuda_kernel_256.cu
11
- __device__ __forceinline__ void atomicAdd(c10::Half* address, c10::Half val) {
12
- unsigned int *address_as_ui = reinterpret_cast<unsigned int *>(reinterpret_cast<char *>(address) - (reinterpret_cast<size_t>(address) & 2));
13
- unsigned int old = *address_as_ui;
14
- unsigned int assumed;
15
-
16
- do {
17
- assumed = old;
18
- unsigned short hsum = reinterpret_cast<size_t>(address) & 2 ? (old >> 16) : (old & 0xffff);
19
- hsum += val;
20
- old = reinterpret_cast<size_t>(address) & 2
21
- ? (old & 0xffff) | (hsum << 16)
22
- : (old & 0xffff0000) | hsum;
23
- old = atomicCAS(address_as_ui, assumed, old);
24
-
25
- // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
26
- } while (assumed != old);
27
- }
28
- __device__ __forceinline__ void atomicAdd(__half* address, c10::Half val) {
29
- unsigned int * address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2));
30
- unsigned int old = *address_as_ui;
31
- unsigned int assumed;
32
-
33
- do {
34
- assumed = old;
35
- __half_raw hsum;
36
- hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
37
- half tmpres = __hadd(hsum, val);
38
- hsum = __half_raw(tmpres);
39
- old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x;
40
- old = atomicCAS(address_as_ui, assumed, old);
41
- } while (assumed != old);
42
- }
43
- #endif
44
-
45
- template <typename scalar_t>
46
- __global__ void VecQuant8MatMulKernel(
47
- const scalar_t* __restrict__ vec,
48
- const int* __restrict__ mat,
49
- scalar_t* __restrict__ mul,
50
- const scalar_t* __restrict__ scales,
51
- const int* __restrict__ zeros,
52
- const int* __restrict__ g_idx,
53
- int batch,
54
- int vec_height,
55
- int height,
56
- int width,
57
- int zero_width
58
- );
59
-
60
- template <typename scalar_t>
61
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel(
62
- const scalar_t* __restrict__ vec,
63
- const int* __restrict__ mat,
64
- scalar_t* __restrict__ mul,
65
- const scalar_t* __restrict__ scales,
66
- const int* __restrict__ zeros,
67
- int batch,
68
- int heads,
69
- int vec_row,
70
- int height,
71
- int width
72
- );
73
-
74
- template <typename scalar_t>
75
- __global__ void VecQuant4BatchMatMulColumnCompressionKernel(
76
- const scalar_t* __restrict__ vec,
77
- const int* __restrict__ mat,
78
- scalar_t* __restrict__ mul,
79
- const scalar_t* __restrict__ scales,
80
- const int* __restrict__ zeros,
81
- int batch,
82
- int heads,
83
- int vec_row,
84
- int height,
85
- int width
86
- );
87
-
88
- template <typename scalar_t>
89
- __global__ void VecQuant8BatchMatMulKernel(
90
- const scalar_t* __restrict__ vec,
91
- const int* __restrict__ mat,
92
- scalar_t* __restrict__ mul,
93
- const scalar_t* __restrict__ scales,
94
- const int* __restrict__ zeros,
95
- int batch,
96
- int heads,
97
- int vec_row,
98
- int vec_height,
99
- int height,
100
- int width,
101
- int zero_width
102
- );
103
-
104
- template <typename scalar_t>
105
- __global__ void VecQuant4BatchMatMulKernel(
106
- const scalar_t* __restrict__ vec,
107
- const int* __restrict__ mat,
108
- scalar_t* __restrict__ mul,
109
- const scalar_t* __restrict__ scales,
110
- const int* __restrict__ zeros,
111
- int batch,
112
- int heads,
113
- int vec_row,
114
- int vec_height,
115
- int height,
116
- int width,
117
- int zero_width
118
- );
119
-
120
-
121
-
122
- template <typename scalar_t>
123
- __global__ void VecQuant8BatchMatMulKernel_old(
124
- const scalar_t* __restrict__ vec,
125
- const uint8_t* __restrict__ mat,
126
- scalar_t* __restrict__ mul,
127
- const scalar_t* __restrict__ scales,
128
- const scalar_t* __restrict__ zeros,
129
- int batch,
130
- int heads,
131
- int vec_row,
132
- int vec_height,
133
- int height,
134
- int width,
135
- int zero_width
136
- );
137
-
138
- __global__ void VecQuant8BatchMatMulKernel_faster(
139
- const half* __restrict__ vec,
140
- const uint8_t* __restrict__ mat,
141
- half* __restrict__ mul,
142
- const half* __restrict__ scales,
143
- const half* __restrict__ zeros,
144
- int batch,
145
- int heads,
146
- int vec_row,
147
- int vec_height,
148
- int height,
149
- int width,
150
- int zero_width
151
- );
152
-
153
-
154
-
155
- __global__ void VecQuant8BatchMatMulKernel_faster_old(
156
- const half* __restrict__ vec,
157
- const uint8_t* __restrict__ mat,
158
- half* __restrict__ mul,
159
- const half* __restrict__ scales,
160
- const half* __restrict__ zeros,
161
- int batch,
162
- int heads,
163
- int vec_row,
164
- int vec_height,
165
- int height,
166
- int width
167
- );
168
-
169
-
170
- template <typename scalar_t>
171
- __global__ void VecQuant4BatchMatMulKernel_old(
172
- const scalar_t* __restrict__ vec,
173
- const uint8_t* __restrict__ mat,
174
- scalar_t* __restrict__ mul,
175
- const scalar_t* __restrict__ scales,
176
- const scalar_t* __restrict__ zeros,
177
- int batch,
178
- int heads,
179
- int vec_row,
180
- int vec_height,
181
- int height,
182
- int width,
183
- int zero_width
184
- );
185
-
186
-
187
- template <typename scalar_t>
188
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel_old(
189
- const scalar_t* __restrict__ vec,
190
- const uint8_t* __restrict__ mat,
191
- scalar_t* __restrict__ mul,
192
- const scalar_t* __restrict__ scales,
193
- const scalar_t* __restrict__ zeros,
194
- int batch,
195
- int heads,
196
- int vec_row,
197
- int height,
198
- int width
199
- );
200
-
201
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel_faster(
202
- const half* __restrict__ vec,
203
- const uint8_t* __restrict__ mat,
204
- half* __restrict__ mul,
205
- const half* __restrict__ scales,
206
- const half* __restrict__ zeros,
207
- int batch,
208
- int heads,
209
- int vec_row,
210
- int height,
211
- int width
212
- );
213
-
214
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel_faster_old(
215
- const half* __restrict__ vec,
216
- const uint8_t* __restrict__ mat,
217
- half* __restrict__ mul,
218
- const half* __restrict__ scales,
219
- const half* __restrict__ zeros,
220
- int batch,
221
- int heads,
222
- int vec_row,
223
- int height,
224
- int width
225
- );
226
-
227
-
228
- template <typename scalar_t>
229
- __global__ void VecQuant4BatchMatMulColumnCompressionKernel_old(
230
- const scalar_t* __restrict__ vec,
231
- const uint8_t* __restrict__ mat,
232
- scalar_t* __restrict__ mul,
233
- const scalar_t* __restrict__ scales,
234
- const scalar_t* __restrict__ zeros,
235
- int batch,
236
- int heads,
237
- int vec_row,
238
- int height,
239
- int width
240
- );
241
-
242
-
243
- __global__ void VecQuant8BatchMatMulKernel_faster(
244
- const half* __restrict__ vec,
245
- const uint8_t* __restrict__ mat,
246
- half* __restrict__ mul,
247
- const half* __restrict__ scales,
248
- const half* __restrict__ zeros,
249
- int batch,
250
- int heads,
251
- int vec_row,
252
- int vec_height,
253
- int height,
254
- int width
255
- );
256
-
257
-
258
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel_faster(
259
- const half* __restrict__ vec,
260
- const uint8_t* __restrict__ mat,
261
- half* __restrict__ mul,
262
- const half* __restrict__ scales,
263
- const half* __restrict__ zeros,
264
- int batch,
265
- int heads,
266
- int vec_row,
267
- int height,
268
- int width
269
- );
270
-
271
- const int BLOCKWIDTH = 128;
272
- const int BLOCKHEIGHT8 = 32;
273
- const int BLOCKHEIGHT4 = 16;
274
- const int BLOCKHEIGHT_OLD4 = 128;
275
- //const int BLOCKHEIGHT_OLD8 = 128;
276
-
277
- __device__ inline unsigned int as_unsigned(int i) {
278
- return *reinterpret_cast<unsigned int*>(&i);
279
- }
280
-
281
- __device__ inline int as_int(int i) {
282
- return *reinterpret_cast<int*>(&i);
283
- }
284
-
285
- void vecquant8matmul_batched_column_compression_cuda(
286
- torch::Tensor vec,
287
- torch::Tensor mat,
288
- torch::Tensor mul,
289
- torch::Tensor scales,
290
- torch::Tensor zeros
291
- ) {
292
- int batch = vec.size(0);
293
- int heads = vec.size(1);
294
- int vec_row = vec.size(2);
295
- int height = vec.size(3);
296
- int width = mat.size(3) * 4;
297
-
298
- dim3 blocks(
299
- (height + BLOCKWIDTH - 1) / BLOCKWIDTH,
300
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
301
- );
302
- dim3 threads(BLOCKWIDTH);
303
-
304
- AT_DISPATCH_FLOATING_TYPES(
305
- vec.type(), "vecquant8matmul_batched_cuda", ([&] {
306
- VecQuant8BatchMatMulColumnCompressionKernel<<<blocks, threads>>>(
307
- vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),
308
- scales.data<scalar_t>(), zeros.data<int>(),
309
- batch, heads, vec_row, height, width
310
- );
311
- })
312
- );
313
-
314
- }
315
-
316
- template <typename scalar_t>
317
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel(
318
- const scalar_t* __restrict__ vec,
319
- const int* __restrict__ mat,
320
- scalar_t* __restrict__ mul,
321
- const scalar_t* __restrict__ scales,
322
- const int* __restrict__ zeros,
323
- int batch,
324
- int heads,
325
- int vec_row,
326
- int height,
327
- int width
328
- ) {
329
- int weight_total = batch * heads * height * width / 4;
330
- int input_total = batch * heads * vec_row * height;
331
- int out_total = batch * heads * vec_row * width;
332
- int tid = threadIdx.x;
333
- // h is index of height with step being BLOCKWIDTH
334
- int h = BLOCKWIDTH * blockIdx.x;
335
- // w is index of width with step being 1
336
- int w = BLOCKWIDTH * blockIdx.y + tid;
337
- if (w >= width && tid >= height) {
338
- return;
339
- }
340
-
341
- __shared__ scalar_t blockvec[BLOCKWIDTH];
342
- int k;
343
- scalar_t w_tmp;
344
-
345
- float weight[BLOCKWIDTH];
346
-
347
- for (int b = 0; b < batch; ++b){
348
- for (int head = 0; head < heads; ++head){
349
- int batch_shift = b * heads + head;
350
- for (k = 0; k < BLOCKWIDTH && h + k < height; ++k){
351
- int i_w = (w / 4);
352
- int w_bit = (w % 4) * 8;
353
-
354
- int w_index = (batch_shift * height + h + k) * width / 4 + i_w;
355
- if (w_index >= weight_total || w >= width) {
356
- weight[k] = 0;
357
- } else {
358
- scalar_t scale = scales[batch_shift * height + h + k];
359
- scalar_t zero = zeros[batch_shift * height + h + k];
360
- w_tmp = ((as_unsigned(mat[w_index]) >> w_bit) & 0xFF);
361
- weight[k] = scale * (w_tmp - zero);
362
- }
363
- }
364
-
365
- scalar_t res;
366
- for (int vr = 0; vr < vec_row; ++vr){
367
- res = 0;
368
- int vec_index = (batch_shift * vec_row + vr) * height + blockIdx.x * BLOCKWIDTH + tid;
369
- if (vec_index < input_total) {
370
- blockvec[tid] = vec[vec_index];
371
- } else {
372
- blockvec[tid] = 0;
373
- }
374
-
375
- __syncthreads();
376
- for (k = 0; k < BLOCKWIDTH && h + k < height; ++k){
377
- // res is the dot product of BLOCKWIDTH elements (part of width)
378
- res += weight[k] * blockvec[k];
379
- }
380
- // add res to the final result, final matrix shape: (batch, vec_row, width)
381
- int out_index = (batch_shift * vec_row + vr) * width + w;
382
- if (out_index < out_total) {
383
- atomicAdd(&mul[out_index], res);
384
- }
385
- __syncthreads();
386
- }
387
- }
388
- }
389
- }
390
-
391
- void vecquant8matmul_batched_cuda(
392
- torch::Tensor vec,
393
- torch::Tensor mat,
394
- torch::Tensor mul,
395
- torch::Tensor scales,
396
- torch::Tensor zeros
397
- ) {
398
- int batch = vec.size(0);
399
- int heads = vec.size(1);
400
- int vec_row = vec.size(2);
401
- int vec_height = vec.size(3);
402
- int height = mat.size(2);
403
- int width = mat.size(3);
404
- int zero_width = zeros.size(2);
405
-
406
- dim3 blocks(
407
- (height + BLOCKHEIGHT8 - 1) / BLOCKHEIGHT8,
408
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
409
- );
410
- dim3 threads(BLOCKWIDTH);
411
-
412
- AT_DISPATCH_FLOATING_TYPES(
413
- vec.type(), "vecquant8matmul_batched_cuda", ([&] {
414
- VecQuant8BatchMatMulKernel<<<blocks, threads>>>(
415
- vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),
416
- scales.data<scalar_t>(), zeros.data<int>(),
417
- batch, heads, vec_row, vec_height, height, width, zero_width
418
- );
419
- })
420
- );
421
-
422
- }
423
-
424
- template <typename scalar_t>
425
- __global__ void VecQuant8BatchMatMulKernel(
426
- const scalar_t* __restrict__ vec,
427
- const int* __restrict__ mat,
428
- scalar_t* __restrict__ mul,
429
- const scalar_t* __restrict__ scales,
430
- const int* __restrict__ zeros,
431
- int batch,
432
- int heads,
433
- int vec_row,
434
- int vec_height,
435
- int height,
436
- int width,
437
- int zero_width
438
- ) {
439
- int weight_total = batch * heads * height * width;
440
- int input_total = batch * heads * vec_row * vec_height;
441
- int out_total = batch * heads * vec_row * width;
442
- int tid = threadIdx.x;
443
- // h is index of height with step being BLOCKHEIGHT8
444
- int h = BLOCKHEIGHT8 * blockIdx.x;
445
- // w is index of width with step being 1
446
- int w = BLOCKWIDTH * blockIdx.y + tid;
447
- if (w >= width && tid >= vec_height) {
448
- return;
449
- }
450
-
451
- __shared__ scalar_t blockvec[BLOCKWIDTH];
452
- // i is index of mat of block first row
453
- int i = width * h + w;
454
- // if (i >= width * height) {
455
- // return;
456
- // }
457
- int k;
458
- scalar_t w_tmp;
459
-
460
- int z_w = w / 4;
461
- int z_mod = (w % 4) * 8;
462
-
463
- float weight[BLOCKWIDTH];
464
-
465
- for (int b = 0; b < batch; ++b){
466
- for (int head = 0; head < heads; ++head){
467
- int batch_shift = b * heads + head;
468
- for (k = 0; k < BLOCKWIDTH && h * 4 + k < vec_height; ++k){
469
- int k_w = (k / 4);
470
- int k_bit = (k % 4) * 8;
471
-
472
- int w_index = batch_shift * height * width + i + (k_w * width);
473
- if (w_index >= weight_total || w >= width) {
474
- weight[k] = 0;
475
- } else {
476
- scalar_t scale = scales[batch_shift * width + w];
477
- scalar_t zero;
478
- if (zero_width == width) {
479
- zero = zeros[batch_shift * width + w];
480
- } else {
481
- zero = scalar_t(((as_unsigned(zeros[batch_shift * zero_width + z_w]) >> z_mod) & 0xFF) + 1);
482
- }
483
- w_tmp = ((as_unsigned(mat[w_index]) >> k_bit) & 0xFF);
484
- weight[k] = scale * (w_tmp - zero);
485
- }
486
- }
487
-
488
- scalar_t res;
489
- for (int vr = 0; vr < vec_row; ++vr){
490
- res = 0;
491
- int vec_index = (batch_shift * vec_row + vr) * vec_height + blockIdx.x * BLOCKWIDTH + tid;
492
- if (vec_index < input_total) {
493
- blockvec[tid] = vec[vec_index];
494
- } else {
495
- blockvec[tid] = 0;
496
- }
497
-
498
- __syncthreads();
499
- for (k = 0; k < BLOCKWIDTH && h * 4 + k < vec_height; ++k){
500
- // res is the dot product of BLOCKWIDTH elements (part of width)
501
- res += weight[k] * blockvec[k];
502
- }
503
- // add res to the final result, final matrix shape: (batch, vec_row, width)
504
- int out_index = (batch_shift * vec_row + vr) * width + w;
505
- if (out_index < out_total) {
506
- atomicAdd(&mul[out_index], res);
507
- }
508
- __syncthreads();
509
- }
510
- }
511
- }
512
- }
513
-
514
-
515
- void vecquant8matmul_cuda(
516
- torch::Tensor vec,
517
- torch::Tensor mat,
518
- torch::Tensor mul,
519
- torch::Tensor scales,
520
- torch::Tensor zeros,
521
- torch::Tensor g_idx
522
- ) {
523
- int batch = vec.size(0);
524
- int vec_height = vec.size(1);
525
- int height = mat.size(0);
526
- int width = mat.size(1);
527
- int zero_width = zeros.size(1);
528
-
529
- dim3 blocks(
530
- (height + BLOCKHEIGHT8 - 1) / BLOCKHEIGHT8,
531
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
532
- );
533
- dim3 threads(BLOCKWIDTH);
534
-
535
- AT_DISPATCH_FLOATING_TYPES(
536
- vec.type(), "vecquant8matmul_cuda", ([&] {
537
- VecQuant8MatMulKernel<<<blocks, threads>>>(
538
- vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),
539
- scales.data<scalar_t>(), zeros.data<int>(), g_idx.data<int>(),
540
- batch, vec_height, height, width, zero_width
541
- );
542
- })
543
- );
544
- }
545
-
546
- template <typename scalar_t>
547
- __global__ void VecQuant8MatMulKernel(
548
- const scalar_t* __restrict__ vec,
549
- const int* __restrict__ mat,
550
- scalar_t* __restrict__ mul,
551
- const scalar_t* __restrict__ scales,
552
- const int* __restrict__ zeros,
553
- const int* __restrict__ g_idx,
554
- int batch,
555
- int vec_height,
556
- int height,
557
- int width,
558
- int zero_width
559
- ) {
560
- int h = BLOCKHEIGHT8 * blockIdx.x;
561
- int w = BLOCKWIDTH * blockIdx.y + threadIdx.x;
562
-
563
- __shared__ scalar_t blockvec[BLOCKWIDTH];
564
- int i = width * h + w;
565
- int g_h = h * 4;
566
- int k;
567
- unsigned int g;
568
- scalar_t w_tmp;
569
-
570
- int z_w = w / 4;
571
- int z_mod = (w % 4) * 8;
572
-
573
- float weight[BLOCKWIDTH];
574
-
575
- for (k = 0; k < BLOCKWIDTH; ++k){
576
- int k_w = (k / 4);
577
- int k_bit = (k % 4) * 8;
578
-
579
- g = as_int(g_idx[g_h + k]);
580
- scalar_t scale = scales[g * width + w];
581
- scalar_t zero = scalar_t(((as_unsigned(zeros[g * zero_width + z_w]) >> z_mod) & 0xFF) + 1);
582
-
583
- w_tmp = ((as_unsigned(mat[i + (k_w * width)]) >> k_bit) & 0xFF);
584
-
585
- weight[k] = scale * (w_tmp - zero);
586
- }
587
-
588
-
589
- scalar_t res;
590
- for (int b = 0; b < batch; ++b){
591
- res = 0;
592
- blockvec[threadIdx.x] = vec[b * vec_height + blockIdx.x * BLOCKWIDTH + threadIdx.x];
593
- __syncthreads();
594
- for (k = 0; k < BLOCKWIDTH; ++k){
595
- res += weight[k] * blockvec[k];
596
- }
597
- atomicAdd(&mul[b * width + w], res);
598
- __syncthreads();
599
- }
600
- }
601
-
602
-
603
-
604
- void vecquant4matmul_batched_cuda(
605
- torch::Tensor vec,
606
- torch::Tensor mat,
607
- torch::Tensor mul,
608
- torch::Tensor scales,
609
- torch::Tensor zeros
610
- ) {
611
- int batch = vec.size(0);
612
- int heads = vec.size(1);
613
- int vec_row = vec.size(2);
614
- int vec_height = vec.size(3);
615
- int height = mat.size(2);
616
- int width = mat.size(3);
617
- int zero_width = zeros.size(2);
618
-
619
- dim3 blocks(
620
- (height + BLOCKHEIGHT4 - 1) / BLOCKHEIGHT4,
621
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
622
- );
623
- dim3 threads(BLOCKWIDTH);
624
-
625
- AT_DISPATCH_FLOATING_TYPES(
626
- vec.type(), "vecquant4matmul_batched_cuda", ([&] {
627
- VecQuant4BatchMatMulKernel<<<blocks, threads>>>(
628
- vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),
629
- scales.data<scalar_t>(), zeros.data<int>(),
630
- batch, heads, vec_row, vec_height, height, width, zero_width
631
- );
632
- })
633
- );
634
-
635
- }
636
-
637
- template <typename scalar_t>
638
- __global__ void VecQuant4BatchMatMulKernel(
639
- const scalar_t* __restrict__ vec,
640
- const int* __restrict__ mat,
641
- scalar_t* __restrict__ mul,
642
- const scalar_t* __restrict__ scales,
643
- const int* __restrict__ zeros,
644
- int batch,
645
- int heads,
646
- int vec_row,
647
- int vec_height,
648
- int height,
649
- int width,
650
- int zero_width
651
- ) {
652
- int weight_total = batch * heads * height * width;
653
- int input_total = batch * heads * vec_row * vec_height;
654
- int out_total = batch * heads * vec_row * width;
655
- int tid = threadIdx.x;
656
- // h is index of height with step being BLOCKHEIGHT4
657
- int h = BLOCKHEIGHT4 * blockIdx.x;
658
- // w is index of width with step being 1
659
- int w = BLOCKWIDTH * blockIdx.y + tid;
660
- if (w >= width && tid >= vec_height) {
661
- return;
662
- }
663
-
664
- __shared__ scalar_t blockvec[BLOCKWIDTH];
665
- // i is index of mat of block first row
666
- int i = width * h + w;
667
- int k;
668
- scalar_t w_tmp;
669
-
670
- int z_w = w / 8;
671
- int z_mod = (w % 8) * 4;
672
-
673
- float weight[BLOCKWIDTH];
674
-
675
- for (int b = 0; b < batch; ++b){
676
- for (int head = 0; head < heads; ++head){
677
- int batch_shift = b * heads + head;
678
- for (k = 0; k < BLOCKWIDTH && h * 8 + k < vec_height; ++k){
679
- int k_w = (k / 8);
680
- int k_bit = (k % 8) * 4;
681
-
682
- int w_index = batch_shift * height * width + i + (k_w * width);
683
- if (w_index >= weight_total || w >= width) {
684
- weight[k] = 0;
685
- } else {
686
- scalar_t scale = scales[batch_shift * width + w];
687
- scalar_t zero;
688
- if (zero_width == width) {
689
- zero = zeros[batch_shift * width + w];
690
- } else {
691
- zero = scalar_t(((as_unsigned(zeros[batch_shift * zero_width + z_w]) >> z_mod) & 0xF));
692
- }
693
- w_tmp = ((as_unsigned(mat[w_index]) >> k_bit) & 0xF);
694
- weight[k] = scale * (w_tmp - zero);
695
- }
696
- }
697
-
698
- scalar_t res;
699
- for (int vr = 0; vr < vec_row; ++vr){
700
- res = 0;
701
- int vec_index = (batch_shift * vec_row + vr) * vec_height + blockIdx.x * BLOCKWIDTH + tid;
702
- if (vec_index < input_total) {
703
- blockvec[tid] = vec[vec_index];
704
- } else {
705
- blockvec[tid] = 0;
706
- }
707
-
708
- __syncthreads();
709
- for (k = 0; k < BLOCKWIDTH && h * 8 + k < vec_height; ++k){
710
- // res is the dot product of BLOCKWIDTH elements (part of width)
711
- res += weight[k] * blockvec[k];
712
- }
713
- // add res to the final result, final matrix shape: (batch, vec_row, width)
714
- int out_index = (batch_shift * vec_row + vr) * width + w;
715
- if (out_index < out_total) {
716
- atomicAdd(&mul[out_index], res);
717
- }
718
- __syncthreads();
719
- }
720
- }
721
- }
722
- }
723
-
724
-
725
-
726
- void vecquant4matmul_batched_column_compression_cuda(
727
- torch::Tensor vec,
728
- torch::Tensor mat,
729
- torch::Tensor mul,
730
- torch::Tensor scales,
731
- torch::Tensor zeros
732
- ) {
733
- int batch = vec.size(0);
734
- int heads = vec.size(1);
735
- int vec_row = vec.size(2);
736
- int height = vec.size(3);
737
- int width = mat.size(3) * 8;
738
-
739
- dim3 blocks(
740
- (height + BLOCKWIDTH - 1) / BLOCKWIDTH,
741
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
742
- );
743
- dim3 threads(BLOCKWIDTH);
744
-
745
- AT_DISPATCH_FLOATING_TYPES(
746
- vec.type(), "vecquant4matmul_batched_cuda", ([&] {
747
- VecQuant4BatchMatMulColumnCompressionKernel<<<blocks, threads>>>(
748
- vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),
749
- scales.data<scalar_t>(), zeros.data<int>(),
750
- batch, heads, vec_row, height, width
751
- );
752
- })
753
- );
754
-
755
- }
756
-
757
- template <typename scalar_t>
758
- __global__ void VecQuant4BatchMatMulColumnCompressionKernel(
759
- const scalar_t* __restrict__ vec,
760
- const int* __restrict__ mat,
761
- scalar_t* __restrict__ mul,
762
- const scalar_t* __restrict__ scales,
763
- const int* __restrict__ zeros,
764
- int batch,
765
- int heads,
766
- int vec_row,
767
- int height,
768
- int width
769
- ) {
770
- int weight_total = batch * heads * height * width / 8;
771
- int input_total = batch * heads * vec_row * height;
772
- int out_total = batch * heads * vec_row * width;
773
- int tid = threadIdx.x;
774
- // h is index of height with step being BLOCKWIDTH
775
- int h = BLOCKWIDTH * blockIdx.x;
776
- // w is index of width with step being 1
777
- int w = BLOCKWIDTH * blockIdx.y + tid;
778
- if (w >= width && tid >= height) {
779
- return;
780
- }
781
-
782
- __shared__ scalar_t blockvec[BLOCKWIDTH];
783
- int k;
784
- scalar_t w_tmp;
785
-
786
- float weight[BLOCKWIDTH];
787
-
788
- for (int b = 0; b < batch; ++b){
789
- for (int head = 0; head < heads; ++head){
790
- int batch_shift = b * heads + head;
791
- for (k = 0; k < BLOCKWIDTH && h + k < height; ++k){
792
- int i_w = (w / 8);
793
- int w_bit = (w % 8) * 4;
794
-
795
- int w_index = (batch_shift * height + h + k) * width / 8 + i_w;
796
- if (w_index >= weight_total || w >= width) {
797
- weight[k] = 0;
798
- } else {
799
- scalar_t scale = scales[batch_shift * height + h + k];
800
- scalar_t zero = zeros[batch_shift * height + h + k];
801
- w_tmp = ((as_unsigned(mat[w_index]) >> w_bit) & 0xF);
802
- weight[k] = scale * (w_tmp - zero);
803
- }
804
- }
805
-
806
- scalar_t res;
807
- for (int vr = 0; vr < vec_row; ++vr){
808
- res = 0;
809
- int vec_index = (batch_shift * vec_row + vr) * height + blockIdx.x * BLOCKWIDTH + tid;
810
- if (vec_index < input_total) {
811
- blockvec[tid] = vec[vec_index];
812
- } else {
813
- blockvec[tid] = 0;
814
- }
815
-
816
- __syncthreads();
817
- for (k = 0; k < BLOCKWIDTH && h + k < height; ++k){
818
- // res is the dot product of BLOCKWIDTH elements (part of width)
819
- res += weight[k] * blockvec[k];
820
- }
821
- // add res to the final result, final matrix shape: (batch, vec_row, width)
822
- int out_index = (batch_shift * vec_row + vr) * width + w;
823
- if (out_index < out_total) {
824
- atomicAdd(&mul[out_index], res);
825
- }
826
- __syncthreads();
827
- }
828
- }
829
- }
830
- }
831
-
832
-
833
- void vecquant8matmul_batched_old_cuda(
834
- torch::Tensor vec,
835
- torch::Tensor mat,
836
- torch::Tensor mul,
837
- torch::Tensor scales,
838
- torch::Tensor zeros
839
- ) {
840
- int batch = vec.size(0);
841
- int heads = vec.size(1);
842
- int vec_row = vec.size(2);
843
- int vec_height = vec.size(3);
844
- int height = mat.size(2);
845
- int width = mat.size(3);
846
- int zero_width = zeros.size(2);
847
-
848
- dim3 blocks(
849
- (height + BLOCKWIDTH - 1) / BLOCKWIDTH,
850
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
851
- );
852
- dim3 threads(BLOCKWIDTH);
853
-
854
- AT_DISPATCH_FLOATING_TYPES(
855
- vec.type(), "vecquant8matmul_batched_old_cuda", ([&] {
856
- VecQuant8BatchMatMulKernel_old<<<blocks, threads>>>(
857
- vec.data<scalar_t>(), mat.data<uint8_t>(), mul.data<scalar_t>(),
858
- scales.data<scalar_t>(), zeros.data<scalar_t>(),
859
- batch, heads, vec_row, vec_height, height, width, zero_width
860
- );
861
- })
862
- );
863
- }
864
-
865
-
866
- template <typename scalar_t>
867
- __global__ void VecQuant8BatchMatMulKernel_old(
868
- const scalar_t* __restrict__ vec,
869
- const uint8_t* __restrict__ mat,
870
- scalar_t* __restrict__ mul,
871
- const scalar_t* __restrict__ scales,
872
- const scalar_t* __restrict__ zeros,
873
- int batch,
874
- int heads,
875
- int vec_row,
876
- int vec_height,
877
- int height,
878
- int width,
879
- int zero_width
880
- ) {
881
- int weight_total = batch * heads * height * width;
882
- int input_total = batch * heads * vec_row * vec_height;
883
- int out_total = batch * heads * vec_row * width;
884
- int tid = threadIdx.x;
885
- // h is index of height with step being BLOCKHEIGHT8
886
- int h = BLOCKWIDTH * blockIdx.x;
887
- // w is index of width with step being 1
888
- int w = BLOCKWIDTH * blockIdx.y + tid;
889
- if (w >= width && tid >= vec_height) {
890
- return;
891
- }
892
-
893
- __shared__ scalar_t blockvec[BLOCKWIDTH];
894
- // i is index of mat of block first row
895
- int i = width * h + w;
896
- int k;
897
- scalar_t w_tmp;
898
-
899
- float weight[BLOCKWIDTH];
900
- for (int b = 0; b < batch; ++b){
901
- for (int head = 0; head < heads; ++head){
902
- int batch_shift = b * heads + head;
903
- for (k = 0; k < BLOCKWIDTH && h + k < vec_height; ++k){
904
- int k_w = k;
905
- int w_index = batch_shift * height * width + i + (k_w * width);
906
- if (w_index >= weight_total || w >= width) {
907
- weight[k] = 0;
908
- } else {
909
- scalar_t scale = scales[batch_shift * width + w];
910
- scalar_t zero = zeros[batch_shift * width + w];
911
- w_tmp = as_unsigned(mat[w_index]);
912
- weight[k] = scale * (w_tmp - zero);
913
- }
914
- }
915
-
916
- scalar_t res;
917
- for (int vr = 0; vr < vec_row; ++vr){
918
- res = 0;
919
- int vec_index = (batch_shift * vec_row + vr) * vec_height + blockIdx.x * BLOCKWIDTH + tid;
920
- if (vec_index < input_total) {
921
- blockvec[tid] = vec[vec_index];
922
- } else {
923
- blockvec[tid] = 0;
924
- }
925
-
926
- __syncthreads();
927
- for (k = 0; k < BLOCKWIDTH && h + k < vec_height; ++k){
928
- // res is the dot product of BLOCKWIDTH elements (part of width)
929
- res += weight[k] * blockvec[k];
930
- }
931
- // add res to the final result, final matrix shape: (batch, vec_row, width)
932
- int out_index = (batch_shift * vec_row + vr) * width + w;
933
- if (out_index < out_total) {
934
- atomicAdd(&mul[out_index], res);
935
- }
936
- __syncthreads();
937
- }
938
- }
939
- }
940
- }
941
-
942
-
943
-
944
- void vecquant8matmul_batched_faster_cuda(
945
- torch::Tensor vec,
946
- torch::Tensor mat,
947
- torch::Tensor mul,
948
- torch::Tensor scales,
949
- torch::Tensor zeros
950
- ) {
951
- int batch = vec.size(0);
952
- int heads = vec.size(1);
953
- int vec_row = vec.size(2);
954
- int vec_height = vec.size(3);
955
- int height = mat.size(2);
956
- int width = mat.size(3);
957
- int zero_width = zeros.size(2);
958
-
959
- dim3 blocks(
960
- (height + BLOCKWIDTH - 1) / BLOCKWIDTH,
961
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
962
- );
963
- dim3 threads(BLOCKWIDTH);
964
-
965
- VecQuant8BatchMatMulKernel_faster<<<blocks, threads>>>(
966
- (half*) vec.data_ptr(),
967
- (uint8_t*) mat.data_ptr(),
968
- (half*) mul.data_ptr(),
969
- (half*) scales.data_ptr(),
970
- (half*) zeros.data_ptr(),
971
- batch, heads, vec_row, vec_height, height, width, zero_width
972
- );
973
- }
974
-
975
-
976
-
977
- __global__ void VecQuant8BatchMatMulKernel_faster(
978
- const half* __restrict__ vec,
979
- const uint8_t* __restrict__ mat,
980
- half* __restrict__ mul,
981
- const half* __restrict__ scales,
982
- const half* __restrict__ zeros,
983
- int batch,
984
- int heads,
985
- int vec_row,
986
- int vec_height,
987
- int height,
988
- int width,
989
- int zero_width
990
- ) {
991
- //int weight_total = batch * heads * height * width;
992
- int input_total = batch * heads * vec_row * vec_height;
993
- int out_total = batch * heads * vec_row * width;
994
- int tid = threadIdx.x;
995
- int h = BLOCKWIDTH * blockIdx.x;
996
- int w = BLOCKWIDTH * blockIdx.y + tid;
997
- if (w >= width && tid >= height) {
998
- return;
999
- }
1000
-
1001
- __shared__ float blockvec[BLOCKWIDTH];
1002
- int i = width * h + w;
1003
- int k;
1004
- float w_tmp;
1005
-
1006
- float weight[BLOCKWIDTH];
1007
- for (int b = 0; b < batch; ++b){
1008
- for (int head = 0; head < heads; ++head){
1009
- int batch_shift = b * heads + head;
1010
- for (k = 0; k < BLOCKWIDTH && h + k < vec_height; ++k){
1011
- int k_w = k;
1012
- int w_index = batch_shift * height * width + i + (k_w * width);
1013
- float scale = __half2float(scales[batch_shift * width + w]);
1014
- float zero = __half2float(zeros[batch_shift * width + w]);
1015
- w_tmp = as_unsigned(mat[w_index]);
1016
- weight[k] = scale *(w_tmp-zero);
1017
- }
1018
-
1019
- float res;
1020
- for (int vr = 0; vr < vec_row; ++vr){
1021
- res = 0;
1022
- int vec_index = (batch_shift * vec_row + vr) * vec_height + blockIdx.x * BLOCKWIDTH + tid;
1023
- if (vec_index < input_total) {
1024
- blockvec[tid] = __half2float(vec[vec_index]);
1025
- } else {
1026
- blockvec[tid] = 0;
1027
- }
1028
- __syncthreads();
1029
- for (k = 0; k < BLOCKWIDTH && h + k < vec_height; ++k){
1030
- float temp_res = weight[k]*blockvec[k];
1031
- res += temp_res;
1032
- }
1033
- int out_index = (batch_shift * vec_row + vr) * width + w;
1034
- if (out_index < out_total) {
1035
- atomicAdd(&mul[out_index], __float2half(res));
1036
- }
1037
- __syncthreads();
1038
- }
1039
- }
1040
- }
1041
- }
1042
-
1043
-
1044
-
1045
-
1046
- void vecquant8matmul_batched_column_compression_faster_cuda(
1047
- torch::Tensor vec,
1048
- torch::Tensor mat,
1049
- torch::Tensor mul,
1050
- torch::Tensor scales,
1051
- torch::Tensor zeros
1052
- ) {
1053
- int batch = vec.size(0);
1054
- int heads = vec.size(1);
1055
- int vec_row = vec.size(2);
1056
- int height = vec.size(3);
1057
- int width = mat.size(3);
1058
-
1059
- dim3 blocks(
1060
- (height + BLOCKWIDTH - 1) / BLOCKWIDTH,
1061
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
1062
- );
1063
- dim3 threads(BLOCKWIDTH);
1064
-
1065
- VecQuant8BatchMatMulColumnCompressionKernel_faster<<<blocks, threads>>>(
1066
- (half*) vec.data_ptr(),
1067
- (uint8_t*) mat.data_ptr(),
1068
- (half*) mul.data_ptr(),
1069
- (half*) scales.data_ptr(),
1070
- (half*) zeros.data_ptr(),
1071
- batch, heads, vec_row, height, width
1072
- );
1073
-
1074
- }
1075
-
1076
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel_faster(
1077
- const half* __restrict__ vec,
1078
- const uint8_t* __restrict__ mat,
1079
- half* __restrict__ mul,
1080
- const half* __restrict__ scales,
1081
- const half* __restrict__ zeros,
1082
- int batch,
1083
- int heads,
1084
- int vec_row,
1085
- int height,
1086
- int width
1087
- ) {
1088
- //int weight_total = batch * heads * height * width;
1089
- int input_total = batch * heads * vec_row * height;
1090
- int out_total = batch * heads * vec_row * width;
1091
- int tid = threadIdx.x;
1092
- int h = BLOCKWIDTH * blockIdx.x;
1093
- int w = BLOCKWIDTH * blockIdx.y + tid;
1094
- if (w >= width && tid >= height) {
1095
- return;
1096
- }
1097
-
1098
- __shared__ float blockvec[BLOCKWIDTH];
1099
- int k;
1100
- float w_tmp;
1101
- float weight[BLOCKWIDTH];
1102
-
1103
- for (int b = 0; b < batch; ++b){
1104
- for (int head = 0; head < heads; ++head){
1105
- int batch_shift = b * heads + head;
1106
- for (k = 0; k < BLOCKWIDTH; ++k){
1107
- int w_index = (batch_shift * height + h + k) * width + w;
1108
- float scale = __half2float(scales[batch_shift * height + h + k]);
1109
- float zero = __half2float(zeros[batch_shift * height + h + k]);
1110
- w_tmp = mat[w_index];
1111
- weight[k] = scale * (w_tmp-zero);
1112
- }
1113
-
1114
- float res;
1115
- for (int vr = 0; vr < vec_row; ++vr){
1116
- res = 0;
1117
- int vec_index = (batch_shift * vec_row + vr) * height + blockIdx.x * BLOCKWIDTH + tid;
1118
- if (vec_index < input_total) {
1119
- blockvec[tid] = __half2float(vec[vec_index]);
1120
- } else {
1121
- blockvec[tid] = 0;
1122
- }
1123
- __syncthreads();
1124
- for (k = 0; k < BLOCKWIDTH; ++k){
1125
- res += weight[k]*blockvec[k];
1126
- }
1127
- int out_index = (batch_shift * vec_row + vr) * width + w;
1128
- if (out_index < out_total) {
1129
- atomicAdd(&mul[out_index], __float2half(res));
1130
- }
1131
- __syncthreads();
1132
- }
1133
- }
1134
- }
1135
- }
1136
-
1137
-
1138
-
1139
- void vecquant8matmul_batched_column_compression_old_cuda(
1140
- torch::Tensor vec,
1141
- torch::Tensor mat,
1142
- torch::Tensor mul,
1143
- torch::Tensor scales,
1144
- torch::Tensor zeros
1145
- ) {
1146
- int batch = vec.size(0);
1147
- int heads = vec.size(1);
1148
- int vec_row = vec.size(2);
1149
- int height = vec.size(3);
1150
- int width = mat.size(3);
1151
-
1152
- dim3 blocks(
1153
- (height + BLOCKWIDTH - 1) / BLOCKWIDTH,
1154
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
1155
- );
1156
- dim3 threads(BLOCKWIDTH);
1157
-
1158
- AT_DISPATCH_FLOATING_TYPES(
1159
- vec.type(), "vecquant8matmul_batched_column_compression_old_cuda", ([&] {
1160
- VecQuant8BatchMatMulColumnCompressionKernel_old<<<blocks, threads>>>(
1161
- vec.data<scalar_t>(), mat.data<uint8_t>(), mul.data<scalar_t>(),
1162
- scales.data<scalar_t>(), zeros.data<scalar_t>(),
1163
- batch, heads, vec_row, height, width
1164
- );
1165
- })
1166
- );
1167
-
1168
- }
1169
-
1170
- template <typename scalar_t>
1171
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel_old(
1172
- const scalar_t* __restrict__ vec,
1173
- const uint8_t* __restrict__ mat,
1174
- scalar_t* __restrict__ mul,
1175
- const scalar_t* __restrict__ scales,
1176
- const scalar_t* __restrict__ zeros,
1177
- int batch,
1178
- int heads,
1179
- int vec_row,
1180
- int height,
1181
- int width
1182
- ) {
1183
- int weight_total = batch * heads * height * width;
1184
- int input_total = batch * heads * vec_row * height;
1185
- int out_total = batch * heads * vec_row * width;
1186
- int tid = threadIdx.x;
1187
- // h is index of height with step being BLOCKWIDTH
1188
- int h = BLOCKWIDTH * blockIdx.x;
1189
- // w is index of width with step being 1
1190
- int w = BLOCKWIDTH * blockIdx.y + tid;
1191
- if (w >= width && tid >= height) {
1192
- return;
1193
- }
1194
-
1195
- __shared__ scalar_t blockvec[BLOCKWIDTH];
1196
- int k;
1197
- scalar_t w_tmp;
1198
-
1199
- float weight[BLOCKWIDTH];
1200
-
1201
- for (int b = 0; b < batch; ++b){
1202
- for (int head = 0; head < heads; ++head){
1203
- int batch_shift = b * heads + head;
1204
- for (k = 0; k < BLOCKWIDTH && h + k < height; ++k){
1205
- int w_index = (batch_shift * height + h + k) * width + w;
1206
- if (w_index >= weight_total || w >= width) {
1207
- weight[k] = 0;
1208
- } else {
1209
- scalar_t scale = scales[batch_shift * height + h + k];
1210
- scalar_t zero = zeros[batch_shift * height + h + k];
1211
- w_tmp = mat[w_index];
1212
- weight[k] = scale * (w_tmp - zero);
1213
- }
1214
- }
1215
-
1216
- scalar_t res;
1217
- for (int vr = 0; vr < vec_row; ++vr){
1218
- res = 0;
1219
- int vec_index = (batch_shift * vec_row + vr) * height + blockIdx.x * BLOCKWIDTH + tid;
1220
- if (vec_index < input_total) {
1221
- blockvec[tid] = vec[vec_index];
1222
- } else {
1223
- blockvec[tid] = 0;
1224
- }
1225
-
1226
- __syncthreads();
1227
- for (k = 0; k < BLOCKWIDTH && h + k < height; ++k){
1228
- // res is the dot product of BLOCKWIDTH elements (part of width)
1229
- res += weight[k] * blockvec[k];
1230
- }
1231
- // add res to the final result, final matrix shape: (batch, vec_row, width)
1232
- int out_index = (batch_shift * vec_row + vr) * width + w;
1233
- if (out_index < out_total) {
1234
- atomicAdd(&mul[out_index], res);
1235
- }
1236
- __syncthreads();
1237
- }
1238
- }
1239
- }
1240
- }
1241
-
1242
-
1243
- void vecquant4matmul_batched_old_cuda(
1244
- torch::Tensor vec,
1245
- torch::Tensor mat,
1246
- torch::Tensor mul,
1247
- torch::Tensor scales,
1248
- torch::Tensor zeros
1249
- ) {
1250
- int batch = vec.size(0);
1251
- int heads = vec.size(1);
1252
- int vec_row = vec.size(2);
1253
- int vec_height = vec.size(3);
1254
- int height = mat.size(2);
1255
- int width = mat.size(3);
1256
- int zero_width = zeros.size(2);
1257
-
1258
- dim3 blocks(
1259
- (height + BLOCKHEIGHT_OLD4 - 1) / BLOCKHEIGHT_OLD4,
1260
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
1261
- );
1262
- dim3 threads(BLOCKWIDTH);
1263
-
1264
- AT_DISPATCH_FLOATING_TYPES(
1265
- vec.type(), "vecquant4matmul_batched_old_cuda", ([&] {
1266
- VecQuant4BatchMatMulKernel_old<<<blocks, threads>>>(
1267
- vec.data<scalar_t>(), mat.data<uint8_t>(), mul.data<scalar_t>(),
1268
- scales.data<scalar_t>(), zeros.data<scalar_t>(),
1269
- batch, heads, vec_row, vec_height, height, width, zero_width
1270
- );
1271
- })
1272
- );
1273
-
1274
- }
1275
-
1276
- template <typename scalar_t>
1277
- __global__ void VecQuant4BatchMatMulKernel_old(
1278
- const scalar_t* __restrict__ vec,
1279
- const uint8_t* __restrict__ mat,
1280
- scalar_t* __restrict__ mul,
1281
- const scalar_t* __restrict__ scales,
1282
- const scalar_t* __restrict__ zeros,
1283
- int batch,
1284
- int heads,
1285
- int vec_row,
1286
- int vec_height,
1287
- int height,
1288
- int width,
1289
- int zero_width
1290
- ) {
1291
- int weight_total = batch * heads * height * width;
1292
- int input_total = batch * heads * vec_row * vec_height;
1293
- int out_total = batch * heads * vec_row * width;
1294
- int tid = threadIdx.x;
1295
- // h is index of height with step being BLOCKHEIGHT_OLD4
1296
- int h = BLOCKHEIGHT_OLD4 * blockIdx.x;
1297
- // w is index of width with step being 1
1298
- int w = BLOCKWIDTH * blockIdx.y + tid;
1299
- if (w >= width && tid >= vec_height) {
1300
- return;
1301
- }
1302
-
1303
- __shared__ scalar_t blockvec[BLOCKWIDTH];
1304
- // i is index of mat of block first row
1305
- int i = width * h + w;
1306
- int k;
1307
- scalar_t w_tmp;
1308
-
1309
- float weight[BLOCKWIDTH];
1310
- for (int b = 0; b < batch; ++b){
1311
- for (int head = 0; head < heads; ++head){
1312
- int batch_shift = b * heads + head;
1313
- for (k = 0; k < BLOCKWIDTH && h*2 + k < vec_height; ++k){
1314
- int k_w = (k / 2);
1315
- int k_bit = (k % 2) * 4;
1316
- int w_index = batch_shift * height * width + i + (k_w * width);
1317
- if (w_index >= weight_total || w >= width) {
1318
- weight[k] = 0;
1319
- } else {
1320
- scalar_t scale = scales[batch_shift * width + w];
1321
- scalar_t zero = zeros[batch_shift * width + w];
1322
- w_tmp = ((as_unsigned(mat[w_index]) >> k_bit) & 0xF);
1323
- weight[k] = scale * (w_tmp - zero);
1324
- }
1325
- }
1326
-
1327
- scalar_t res;
1328
- for (int vr = 0; vr < vec_row; ++vr){
1329
- res = 0;
1330
- int vec_index = (batch_shift * vec_row + vr) * vec_height + blockIdx.x * BLOCKWIDTH + tid;
1331
- if (vec_index < input_total) {
1332
- blockvec[tid] = vec[vec_index];
1333
- } else {
1334
- blockvec[tid] = 0;
1335
- }
1336
-
1337
- __syncthreads();
1338
- for (k = 0; k < BLOCKWIDTH && h*2 + k < vec_height; ++k){
1339
- // res is the dot product of BLOCKWIDTH elements (part of width)
1340
- res += weight[k] * blockvec[k];
1341
- }
1342
- // add res to the final result, final matrix shape: (batch, vec_row, width)
1343
- int out_index = (batch_shift * vec_row + vr) * width + w;
1344
- if (out_index < out_total) {
1345
- atomicAdd(&mul[out_index], res);
1346
- }
1347
- __syncthreads();
1348
- }
1349
- }
1350
- }
1351
- }
1352
-
1353
-
1354
-
1355
-
1356
-
1357
- void vecquant4matmul_batched_column_compression_old_cuda(
1358
- torch::Tensor vec,
1359
- torch::Tensor mat,
1360
- torch::Tensor mul,
1361
- torch::Tensor scales,
1362
- torch::Tensor zeros
1363
- ) {
1364
- int batch = vec.size(0);
1365
- int heads = vec.size(1);
1366
- int vec_row = vec.size(2);
1367
- int height = vec.size(3);
1368
- int width = mat.size(3);
1369
-
1370
- dim3 blocks(
1371
- (height + BLOCKHEIGHT_OLD4 - 1) / BLOCKHEIGHT_OLD4,
1372
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
1373
- );
1374
- dim3 threads(BLOCKWIDTH);
1375
-
1376
- AT_DISPATCH_FLOATING_TYPES(
1377
- vec.type(), "vecquant4matmul_batched_column_compression_old_cuda", ([&] {
1378
- VecQuant4BatchMatMulColumnCompressionKernel_old<<<blocks, threads>>>(
1379
- vec.data<scalar_t>(), mat.data<uint8_t>(), mul.data<scalar_t>(),
1380
- scales.data<scalar_t>(), zeros.data<scalar_t>(),
1381
- batch, heads, vec_row, height, width
1382
- );
1383
- })
1384
- );
1385
-
1386
- }
1387
-
1388
- template <typename scalar_t>
1389
- __global__ void VecQuant4BatchMatMulColumnCompressionKernel_old(
1390
- const scalar_t* __restrict__ vec,
1391
- const uint8_t* __restrict__ mat,
1392
- scalar_t* __restrict__ mul,
1393
- const scalar_t* __restrict__ scales,
1394
- const scalar_t* __restrict__ zeros,
1395
- int batch,
1396
- int heads,
1397
- int vec_row,
1398
- int height,
1399
- int width
1400
- ) {
1401
- int weight_total = batch * heads * height * width;
1402
- int input_total = batch * heads * vec_row * height;
1403
- int out_total = batch * heads * vec_row * width;
1404
- int tid = threadIdx.x;
1405
- // h is index of height with step being BLOCKWIDTH
1406
- int h = BLOCKHEIGHT_OLD4 * blockIdx.x;
1407
- // w is index of width with step being 1
1408
- int w = BLOCKWIDTH * blockIdx.y + tid;
1409
- if (w >= width && tid >= height) {
1410
- return;
1411
- }
1412
-
1413
- __shared__ scalar_t blockvec[BLOCKWIDTH];
1414
- int k;
1415
- scalar_t w_tmp;
1416
-
1417
- float weight[BLOCKWIDTH];
1418
-
1419
- for (int b = 0; b < batch; ++b){
1420
- for (int head = 0; head < heads; ++head){
1421
- int batch_shift = b * heads + head;
1422
- for (k = 0; k < BLOCKWIDTH && h*2 + k < height; ++k){
1423
- int k_w = (k / 2);
1424
- int k_bit = (k % 2) * 4;
1425
- int w_index = (batch_shift * height + h + k) * width + k_w;
1426
- if (w_index >= weight_total || w >= width) {
1427
- weight[k] = 0;
1428
- } else {
1429
- scalar_t scale = scales[batch_shift * height + h + k];
1430
- scalar_t zero = zeros[batch_shift * height + h + k];
1431
- w_tmp = ((as_unsigned(mat[w_index]) >> k_bit) & 0xF);
1432
- weight[k] = scale * (w_tmp - zero);
1433
- }
1434
- }
1435
-
1436
- scalar_t res;
1437
- for (int vr = 0; vr < vec_row; ++vr){
1438
- res = 0;
1439
- int vec_index = (batch_shift * vec_row + vr) * height + blockIdx.x * BLOCKWIDTH + tid;
1440
- if (vec_index < input_total) {
1441
- blockvec[tid] = vec[vec_index];
1442
- } else {
1443
- blockvec[tid] = 0;
1444
- }
1445
-
1446
- __syncthreads();
1447
- for (k = 0; k < BLOCKWIDTH && h*2 + k < height; ++k){
1448
- // res is the dot product of BLOCKWIDTH elements (part of width)
1449
- res += weight[k] * blockvec[k];
1450
- }
1451
- // add res to the final result, final matrix shape: (batch, vec_row, width)
1452
- int out_index = (batch_shift * vec_row + vr) * width + w;
1453
- if (out_index < out_total) {
1454
- atomicAdd(&mul[out_index], res);
1455
- }
1456
- __syncthreads();
1457
- }
1458
- }
1459
- }
1460
- }
1461
-
1462
-
1463
-
1464
-
1465
-
1466
- void vecquant8matmul_batched_faster_old_cuda(
1467
- torch::Tensor vec,
1468
- torch::Tensor mat,
1469
- torch::Tensor mul,
1470
- torch::Tensor scales,
1471
- torch::Tensor zeros
1472
- ) {
1473
- int batch = vec.size(0);
1474
- int heads = vec.size(1);
1475
- int vec_row = vec.size(2);
1476
- int vec_height = vec.size(3);
1477
- int height = mat.size(2);
1478
- int width = mat.size(3);
1479
-
1480
- dim3 blocks(
1481
- (height + BLOCKWIDTH - 1) / BLOCKWIDTH,
1482
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
1483
- );
1484
- dim3 threads(BLOCKWIDTH);
1485
-
1486
- VecQuant8BatchMatMulKernel_faster_old<<<blocks, threads>>>(
1487
- (half*) vec.data_ptr(),
1488
- (uint8_t*) mat.data_ptr(),
1489
- (half*) mul.data_ptr(),
1490
- (half*) scales.data_ptr(),
1491
- (half*) zeros.data_ptr(),
1492
- batch, heads, vec_row, vec_height, height, width
1493
- );
1494
- }
1495
-
1496
-
1497
- __global__ void VecQuant8BatchMatMulKernel_faster_old(
1498
- const half* __restrict__ vec,
1499
- const uint8_t* __restrict__ mat,
1500
- half* __restrict__ mul,
1501
- const half* __restrict__ scales,
1502
- const half* __restrict__ zeros,
1503
- int batch,
1504
- int heads,
1505
- int vec_row,
1506
- int vec_height,
1507
- int height,
1508
- int width
1509
- ) {
1510
- int weight_total = batch * heads * height * width;
1511
- int input_total = batch * heads * vec_row * vec_height;
1512
- int out_total = batch * heads * vec_row * width;
1513
- int tid = threadIdx.x;
1514
- const int BLOCKWIDTH_half = BLOCKWIDTH/2;
1515
-
1516
- int h = BLOCKWIDTH * blockIdx.x; //head_dim, dim=-1
1517
- int w = BLOCKWIDTH * blockIdx.y + tid; //seq-len, +0-256 ,dim=-2
1518
- /*
1519
- if (w >= width && tid >= vec_height) {
1520
- return;
1521
- }
1522
- */
1523
- __shared__ half blockvec[BLOCKWIDTH]; //256
1524
- int i = width * h + w;
1525
- int k;
1526
-
1527
- half w_tmp1 = __float2half(0);
1528
- half w_tmp2 = __float2half(0);
1529
-
1530
- half2 weight[BLOCKWIDTH_half];
1531
- for (int b = 0; b < batch; ++b){
1532
- for (int head = 0; head < heads; ++head){
1533
- int batch_shift = b * heads + head;
1534
- //int zero_index = batch_shift;
1535
- for (k = 0; k < BLOCKWIDTH_half; ++k){
1536
- int w_index1 = batch_shift * height * width + i + (2 * k * width); // [batch,head,h+k, w]
1537
- int w_index2 = batch_shift * height * width + i + ((2 * k + 1) * width);
1538
- int zero_index = batch_shift * width + w; // [batch,head, w]
1539
- if (w_index1 >= weight_total || w >= width || (2 * k + h) >= height) {
1540
- weight[k] = __float2half2_rn(0);
1541
- } else {
1542
- float zero_f=__half2float(zeros[zero_index]);
1543
- float scale_f= __half2float(scales[zero_index]);
1544
- if (w_index2 >= weight_total){
1545
- w_tmp1 = __float2half((as_unsigned(mat[w_index1]) -zero_f)*scale_f);
1546
- w_tmp2 = __float2half(0);
1547
- weight[k] = __halves2half2(w_tmp1,w_tmp2);
1548
- //printf("zero_index is %d w is %d height is %d width is %d w_index1 is %d w_tmp1 is %f w_tmp2 is %f zero is %f scale is %f low is %f high is %f \n ",zero_index,w,height, width,w_index1,__half2float(w_tmp1),__half2float(w_tmp2),zero_f,scale_f,__low2float(weight[k]),__high2float(weight[k]));
1549
- }else{
1550
- w_tmp1 = __int2half_rn(as_unsigned(mat[w_index1]));
1551
- w_tmp2 = __int2half_rn(as_unsigned(mat[w_index2]));
1552
-
1553
- //weight[k] = __hmul2(__hsub2(__halves2half2(w_tmp1,w_tmp2), __halves2half2(zero,zero)),__halves2half2(scale,scale));
1554
- weight[k] = __hfma2(__halves2half2(w_tmp1,w_tmp2), __float2half2_rn(scale_f), __float2half2_rn(-(scale_f * zero_f)));
1555
- //printf("zero_index1 is %d zero_index2 is %d k is %d head is %d w is %d h is %d height is %d width is %d w_index1 is %d w_index2 is %d zero is %f scale is %f low is %f high is %f \n ",zero_index1,zero_index2,k,head,w,h,height, width,w_index1,w_index2,__half2float(zero1),__half2float(scale1),__low2float(weight[k]),__high2float(weight[k]));
1556
- }
1557
- }
1558
- }
1559
-
1560
-
1561
- for (int vr = 0; vr < vec_row; ++vr){
1562
- float res=0;
1563
- int vec_index = (batch_shift * vec_row + vr) * height + blockIdx.x * BLOCKWIDTH + tid;
1564
- int out_index = (batch_shift * vec_row + vr) * width + w;
1565
- if (vec_index < input_total) {
1566
- //blockvec[tid] = __half2float(vec[vec_index]);// [batch, head, vr, tid(seq_len dim+)]
1567
- blockvec[tid] = vec[vec_index];
1568
- //printf("width is %d height is %d h is %d w is %d vec_index is %d out_index is %d vec_row is %d vec_height is %d,vr is %d tid is %d blockvec is %f\n",width,height, h,w,vec_index,out_index,vec_row,vec_height,vr,tid,blockvec[tid]);
1569
- } else {
1570
- blockvec[tid] = __float2half(0);
1571
- }
1572
- __syncthreads();
1573
- if (out_index < out_total) {
1574
- for (k = 0; k < BLOCKWIDTH_half; ++k){
1575
- half2 res2 = __hmul2(weight[k],__halves2half2(blockvec[2*k],blockvec[2*k+1]));
1576
- res += __low2float(res2) + __high2float(res2);
1577
- }
1578
- atomicAdd(&mul[out_index], __float2half(res));
1579
- }
1580
- __syncthreads();
1581
- }
1582
- }
1583
- }
1584
- }
1585
-
1586
-
1587
- void vecquant8matmul_batched_column_compression_faster_old_cuda(
1588
- torch::Tensor vec, // [batch,heads, seq_q, seq_v]
1589
- torch::Tensor mat, // [batch,heads, seq_v, head_dim]
1590
- torch::Tensor mul, // [batch,heads, seq_q,head_dim]
1591
- torch::Tensor scales, // [batch,heads, head_dim]
1592
- torch::Tensor zeros
1593
- ) {
1594
- int batch = vec.size(0);
1595
- int heads = vec.size(1);
1596
- int vec_row = vec.size(2); //ql
1597
- int height = mat.size(2); //vl
1598
- int width = mat.size(3); //head_dim
1599
-
1600
- dim3 blocks(
1601
- (height + BLOCKWIDTH - 1) / BLOCKWIDTH,
1602
- (width + BLOCKWIDTH - 1) / BLOCKWIDTH
1603
- );
1604
- dim3 threads(BLOCKWIDTH);
1605
-
1606
- VecQuant8BatchMatMulColumnCompressionKernel_faster_old<<<blocks, threads>>>(
1607
- (half*) vec.data_ptr(),
1608
- (uint8_t*) mat.data_ptr(),
1609
- (half*) mul.data_ptr(),
1610
- (half*) scales.data_ptr(),
1611
- (half*) zeros.data_ptr(),
1612
- batch, heads, vec_row, height, width
1613
- );
1614
-
1615
- }
1616
-
1617
-
1618
- __global__ void VecQuant8BatchMatMulColumnCompressionKernel_faster_old(
1619
- const half* __restrict__ vec, // [batch,heads, seq_q, seq_v]
1620
- const uint8_t* __restrict__ mat, // [batch,heads, seq_v, head_dim]
1621
- half* __restrict__ mul, // [batch,heads, seq_q,head_dim]
1622
- const half* __restrict__ scales, // [batch,heads, seq_v]
1623
- const half* __restrict__ zeros,
1624
- int batch,
1625
- int heads,
1626
- int vec_row, //seq_q
1627
- int height, //seq_v
1628
- int width //head_dim
1629
- ) {
1630
- int weight_total = batch * heads * height * width;
1631
- int input_total = batch * heads * vec_row * height;
1632
- int out_total = batch * heads * vec_row * width;
1633
- int tid = threadIdx.x;
1634
- int h = BLOCKWIDTH * blockIdx.x; // vl
1635
- int w = BLOCKWIDTH * blockIdx.y + tid; //head_dim + block
1636
- if (w >= width && tid >= height) {
1637
- return;
1638
- }
1639
- __shared__ half blockvec[BLOCKWIDTH];
1640
- int k;
1641
- half w_tmp1 = __float2half(0);
1642
- half w_tmp2 = __float2half(0);
1643
- int i = width * h + w;
1644
- const int BLOCKWIDTH_half = BLOCKWIDTH/2;
1645
- half2 weight[BLOCKWIDTH_half];
1646
-
1647
- for (int b = 0; b < batch; ++b){
1648
- for (int head = 0; head < heads; ++head){
1649
- int batch_shift = b * heads + head;
1650
- //int zero_index = batch_shift;
1651
- for (k = 0; k < BLOCKWIDTH_half; ++k){
1652
- int w_index1 = batch_shift * height * width + i + (2 * k) * width; // [batch,head, h+k, w]
1653
- int w_index2 = batch_shift * height * width + i + ((2 * k + 1) * width);
1654
- int zero_index1 = batch_shift * height + h + 2*k; // [batch,head, w]
1655
- int zero_index2 = batch_shift * height + h + 2*k+1; // [batch,head, w]
1656
-
1657
- if (w_index1 >= weight_total || (2 * k + h)>=height) {
1658
- weight[k]=__float2half2_rn(0);
1659
- } else{
1660
- //int zero_index = batch_shift + h; // [batch,head, w]
1661
- //float scale_f1 = __half2float(scales[zero_index1]);
1662
- //float zero_f1 = __half2float(zeros[zero_index1]);
1663
- if (w_index2>=weight_total){
1664
- w_tmp1 = __float2half((as_unsigned(mat[w_index1]) - __half2float(zeros[zero_index1]))* __half2float(scales[zero_index1]));
1665
- w_tmp2 = __float2half(0);
1666
- weight[k] = __halves2half2(w_tmp1,w_tmp2);
1667
- //printf("zero_index is %d k is %d w is %d head is %d height is %d width is %d w_index1 is %d w_tmp1 is %f w_tmp2 is %f zero is %f scale is %f low is %f high is %f \n ",zero_index,k,w,head,height, width,w_index1,__half2float(w_tmp1),__half2float(w_tmp2),zero_f,scale_f,__low2float(weight[k]),__high2float(weight[k]));
1668
- }else{
1669
- w_tmp1 = __int2half_rn(as_unsigned(mat[w_index1]));
1670
- w_tmp2 = __int2half_rn(as_unsigned(mat[w_index2]));
1671
- half zero1=zeros[zero_index1];
1672
- half zero2=zeros[zero_index2];
1673
- half scale1=scales[zero_index1];
1674
- half scale2=scales[zero_index2];
1675
- weight[k] = __hmul2(__hsub2(__halves2half2(w_tmp1,w_tmp2), __halves2half2(zero1,zero2)),__halves2half2(scale1,scale2));
1676
- //weight[k] = __hfma2(__halves2half2(w_tmp1,w_tmp2), __float2half2_rn(scale_f), __float2half2_rn(-(scale_f * zero_f)));
1677
- //printf("zero_index1 is %d zero_index2 is %d k is %d head is %d w is %d h is %d height is %d width is %d w_index1 is %d w_index2 is %d zero is %f scale is %f low is %f high is %f \n ",zero_index1,zero_index2,k,head,w,h,height, width,w_index1,w_index2,__half2float(zero1),__half2float(scale1),__low2float(weight[k]),__high2float(weight[k]));
1678
- }
1679
- }
1680
- }
1681
-
1682
-
1683
- for (int vr = 0; vr < vec_row; ++vr){
1684
- float res=0;
1685
- int vec_index = (batch_shift * vec_row + vr) * height + blockIdx.x * BLOCKWIDTH + tid;
1686
- int out_index = (batch_shift * vec_row + vr) * width + w;
1687
-
1688
- if (vec_index < input_total) {
1689
- //blockvec[tid] = __half2float(vec[vec_index]);
1690
- blockvec[tid] = vec[vec_index];
1691
- //printf("vec_index is %d out_index is %d vec_row is %d ,vr is %d tid is %d blockvec is %f\n",vec_index,out_index,vec_row,vr,tid,blockvec[tid]);
1692
- } else {
1693
- blockvec[tid] = __float2half(0);
1694
- //blockvec[tid] = 0;
1695
- }
1696
- __syncthreads();
1697
- if (out_index < out_total) {
1698
- for (k = 0; k < BLOCKWIDTH_half; ++k){
1699
- half2 res2 = __hmul2(weight[k],__halves2half2(blockvec[2*k],blockvec[2*k+1]));
1700
- res += __low2float(res2) + __high2float(res2);
1701
- }
1702
- atomicAdd(&mul[out_index], __float2half(res));
1703
- }
1704
- __syncthreads();
1705
- }
1706
- }
1707
- }
1708
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "architectures": [
3
  "QWenLMHeadModel"
4
  ],
@@ -6,32 +8,39 @@
6
  "AutoConfig": "configuration_qwen.QWenConfig",
7
  "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
8
  },
9
- "attn_dropout_prob": 0.0,
10
  "bf16": false,
11
- "emb_dropout_prob": 0.0,
12
  "fp16": false,
13
  "fp32": false,
14
- "hidden_size": 4096,
15
- "intermediate_size": 22016,
 
 
 
16
  "initializer_range": 0.02,
17
  "kv_channels": 128,
18
  "layer_norm_epsilon": 1e-06,
19
- "max_position_embeddings": 32768,
20
  "model_type": "qwen",
 
 
 
 
21
  "no_bias": true,
22
- "num_attention_heads": 32,
23
- "num_hidden_layers": 32,
24
  "onnx_safe": null,
 
 
 
 
25
  "rotary_emb_base": 10000,
26
  "rotary_pct": 1.0,
27
  "scale_attn_weights": true,
28
- "seq_length": 8192,
29
  "tie_word_embeddings": false,
30
- "tokenizer_class": "QWenTokenizer",
31
- "transformers_version": "4.32.0",
32
  "use_cache": true,
33
- "use_dynamic_ntk": true,
34
  "use_flash_attn": "auto",
35
- "use_logn_attn": true,
36
- "vocab_size": 151936
37
- }
 
 
1
  {
2
+ "activation": "swiglu",
3
+ "apply_residual_connection_post_layernorm": false,
4
  "architectures": [
5
  "QWenLMHeadModel"
6
  ],
 
8
  "AutoConfig": "configuration_qwen.QWenConfig",
9
  "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
10
  },
11
+ "attn_pdrop": 0.0,
12
  "bf16": false,
 
13
  "fp16": false,
14
  "fp32": false,
15
+ "bias_dropout_fusion": true,
16
+ "bos_token_id": 151643,
17
+ "embd_pdrop": 0.0,
18
+ "eos_token_id": 151643,
19
+ "ffn_hidden_size": 22016,
20
  "initializer_range": 0.02,
21
  "kv_channels": 128,
22
  "layer_norm_epsilon": 1e-06,
 
23
  "model_type": "qwen",
24
+ "n_embd": 4096,
25
+ "n_head": 32,
26
+ "n_layer": 32,
27
+ "n_positions": 6144,
28
  "no_bias": true,
 
 
29
  "onnx_safe": null,
30
+ "padded_vocab_size": 151936,
31
+ "params_dtype": "torch.bfloat16",
32
+ "pos_emb": "rotary",
33
+ "resid_pdrop": 0.1,
34
  "rotary_emb_base": 10000,
35
  "rotary_pct": 1.0,
36
  "scale_attn_weights": true,
37
+ "seq_length": 2048,
38
  "tie_word_embeddings": false,
39
+ "tokenizer_type": "QWenTokenizer",
40
+ "transformers_version": "4.31.0",
41
  "use_cache": true,
 
42
  "use_flash_attn": "auto",
43
+ "vocab_size": 151936,
44
+ "use_dynamic_ntk": true,
45
+ "use_logn_attn": true
46
+ }
configuration_qwen.py CHANGED
@@ -9,49 +9,61 @@ from transformers import PretrainedConfig
9
  class QWenConfig(PretrainedConfig):
10
  model_type = "qwen"
11
  keys_to_ignore_at_inference = ["past_key_values"]
 
 
 
 
 
 
12
 
13
  def __init__(
14
  self,
15
- vocab_size=151936,
16
- hidden_size=4096,
17
- num_hidden_layers=32,
18
- num_attention_heads=32,
19
- emb_dropout_prob=0.0,
20
- attn_dropout_prob=0.0,
21
- layer_norm_epsilon=1e-6,
 
22
  initializer_range=0.02,
23
- max_position_embeddings=8192,
24
  scale_attn_weights=True,
25
  use_cache=True,
 
 
26
  bf16=False,
27
  fp16=False,
28
  fp32=False,
29
  kv_channels=128,
30
  rotary_pct=1.0,
31
  rotary_emb_base=10000,
32
- use_dynamic_ntk=True,
33
- use_logn_attn=True,
34
- use_flash_attn="auto",
35
- intermediate_size=22016,
36
  no_bias=True,
37
  tie_word_embeddings=False,
38
- use_cache_quantization=False,
39
- use_cache_kernel=False,
40
- softmax_in_fp32=False,
41
  **kwargs,
42
  ):
 
 
 
 
 
43
  self.vocab_size = vocab_size
44
- self.hidden_size = hidden_size
45
- self.intermediate_size = intermediate_size
46
- self.num_hidden_layers = num_hidden_layers
47
- self.num_attention_heads = num_attention_heads
48
- self.emb_dropout_prob = emb_dropout_prob
49
- self.attn_dropout_prob = attn_dropout_prob
50
  self.layer_norm_epsilon = layer_norm_epsilon
51
  self.initializer_range = initializer_range
52
  self.scale_attn_weights = scale_attn_weights
53
  self.use_cache = use_cache
54
- self.max_position_embeddings = max_position_embeddings
 
 
55
  self.bf16 = bf16
56
  self.fp16 = fp16
57
  self.fp32 = fp32
@@ -61,11 +73,6 @@ class QWenConfig(PretrainedConfig):
61
  self.use_dynamic_ntk = use_dynamic_ntk
62
  self.use_logn_attn = use_logn_attn
63
  self.use_flash_attn = use_flash_attn
 
64
  self.no_bias = no_bias
65
- self.use_cache_quantization = use_cache_quantization
66
- self.use_cache_kernel = use_cache_kernel
67
- self.softmax_in_fp32 = softmax_in_fp32
68
- super().__init__(
69
- tie_word_embeddings=tie_word_embeddings,
70
- **kwargs
71
- )
 
9
  class QWenConfig(PretrainedConfig):
10
  model_type = "qwen"
11
  keys_to_ignore_at_inference = ["past_key_values"]
12
+ attribute_map = {
13
+ "hidden_size": "n_embd",
14
+ "num_attention_heads": "n_head",
15
+ "max_position_embeddings": "n_positions",
16
+ "num_hidden_layers": "n_layer",
17
+ }
18
 
19
  def __init__(
20
  self,
21
+ vocab_size=151851,
22
+ n_embd=4096,
23
+ n_layer=32,
24
+ n_head=32,
25
+ n_inner=None,
26
+ embd_pdrop=0.0,
27
+ attn_pdrop=0.0,
28
+ layer_norm_epsilon=1e-5,
29
  initializer_range=0.02,
 
30
  scale_attn_weights=True,
31
  use_cache=True,
32
+ eos_token_id=151643,
33
+ apply_residual_connection_post_layernorm=False,
34
  bf16=False,
35
  fp16=False,
36
  fp32=False,
37
  kv_channels=128,
38
  rotary_pct=1.0,
39
  rotary_emb_base=10000,
40
+ use_dynamic_ntk=False,
41
+ use_logn_attn=False,
42
+ use_flash_attn=True,
43
+ ffn_hidden_size=22016,
44
  no_bias=True,
45
  tie_word_embeddings=False,
 
 
 
46
  **kwargs,
47
  ):
48
+ self.eos_token_id = eos_token_id
49
+ super().__init__(
50
+ eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
51
+ )
52
+
53
  self.vocab_size = vocab_size
54
+ self.n_embd = n_embd
55
+ self.n_layer = n_layer
56
+ self.n_head = n_head
57
+ self.n_inner = n_inner
58
+ self.embd_pdrop = embd_pdrop
59
+ self.attn_pdrop = attn_pdrop
60
  self.layer_norm_epsilon = layer_norm_epsilon
61
  self.initializer_range = initializer_range
62
  self.scale_attn_weights = scale_attn_weights
63
  self.use_cache = use_cache
64
+ self.apply_residual_connection_post_layernorm = (
65
+ apply_residual_connection_post_layernorm
66
+ )
67
  self.bf16 = bf16
68
  self.fp16 = fp16
69
  self.fp32 = fp32
 
73
  self.use_dynamic_ntk = use_dynamic_ntk
74
  self.use_logn_attn = use_logn_attn
75
  self.use_flash_attn = use_flash_attn
76
+ self.ffn_hidden_size = ffn_hidden_size
77
  self.no_bias = no_bias
78
+ self.tie_word_embeddings = tie_word_embeddings
 
 
 
 
 
 
cpp_kernels.py DELETED
@@ -1,55 +0,0 @@
1
- from torch.utils import cpp_extension
2
- import pathlib
3
- import os
4
- import subprocess
5
-
6
- def _get_cuda_bare_metal_version(cuda_dir):
7
- raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
8
- universal_newlines=True)
9
- output = raw_output.split()
10
- release_idx = output.index("release") + 1
11
- release = output[release_idx].split(".")
12
- bare_metal_major = release[0]
13
- bare_metal_minor = release[1][0]
14
-
15
- return raw_output, bare_metal_major, bare_metal_minor
16
-
17
- def _create_build_dir(buildpath):
18
- try:
19
- os.mkdir(buildpath)
20
- except OSError:
21
- if not os.path.isdir(buildpath):
22
- print(f"Creation of the build directory {buildpath} failed")
23
-
24
- # Check if cuda 11 is installed for compute capability 8.0
25
- cc_flag = []
26
- _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
27
- if int(bare_metal_major) >= 11:
28
- cc_flag.append('-gencode')
29
- cc_flag.append('arch=compute_80,code=sm_80')
30
- if int(bare_metal_minor) >= 7:
31
- cc_flag.append('-gencode')
32
- cc_flag.append('arch=compute_90,code=sm_90')
33
-
34
- # Build path
35
- srcpath = pathlib.Path(__file__).parent.absolute()
36
- buildpath = srcpath / 'build'
37
- _create_build_dir(buildpath)
38
-
39
- def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
40
- return cpp_extension.load(
41
- name=name,
42
- sources=sources,
43
- build_directory=buildpath,
44
- extra_cflags=['-O3', ],
45
- extra_cuda_cflags=['-O3',
46
- '-gencode', 'arch=compute_70,code=sm_70',
47
- '--use_fast_math'] + extra_cuda_flags + cc_flag,
48
- verbose=1
49
- )
50
-
51
- extra_flags = []
52
-
53
- cache_autogptq_cuda_256_sources = ["./cache_autogptq_cuda_256.cpp",
54
- "./cache_autogptq_cuda_kernel_256.cu"]
55
- cache_autogptq_cuda_256 = _cpp_extention_load_helper("cache_autogptq_cuda_256", cache_autogptq_cuda_256_sources, extra_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
examples/react_prompt.md CHANGED
@@ -1,17 +1,13 @@
1
  # ReAct Prompting 示例
2
 
3
- 本文档将介绍如何用 ReAct Prompting 技术命令千问使用工具。
4
-
5
- 本文档主要基本的原理概念介绍,并在文末附上了一些具体实现相关的 FAQ,但不含被调用插件的实际实现。如果您更喜欢一边调试实际可执行的代码、一边理解原理,可以转而阅读整合了 LangChain 常用工具的这个 [ipython notebook](https://github.com/QwenLM/Qwen-7B/blob/main/examples/langchain_tooluse.ipynb)。
6
-
7
- 此外,本文档和前述的 ipython notebook 都仅介绍单轮对话的实现。如果想了解多轮对话下的实现,可参见 [react_demo.py](https://github.com/QwenLM/Qwen-7B/blob/main/examples/react_demo.py)。
8
 
9
  ## 准备工作一:样例问题、样例工具
10
 
11
  假设我们有如下的一个适合用工具处理的 query,以及有夸克搜索、通义万相文生图这两个工具:
12
 
13
  ```py
14
- query = '现在给我画个五彩斑斓的黑。'
15
 
16
  TOOLS = [
17
  {
@@ -123,7 +119,7 @@ Final Answer: the final answer to the original input question
123
 
124
  Begin!
125
 
126
- Question: 现在给我画个五彩斑斓的黑。
127
  ```
128
 
129
  将这个 prompt 送入千问,并记得设置 "Observation" 为 stop word (见本文末尾的 FAQ)—— 即让千问在预测到要生成的下一个词是 "Observation" 时马上停止生成 —— 则千问在得到这个 prompt 后会生成如下的结果:
@@ -170,7 +166,7 @@ Final Answer: the final answer to the original input question
170
 
171
  Begin!
172
 
173
- Question: 现在给我画个五彩斑斓的黑。
174
  Thought: 我应该使用通义万相API来生成一张五彩斑斓的黑的图片。
175
  Action: image_gen
176
  Action Input: {"query": "五彩斑斓的黑"}
@@ -246,4 +242,4 @@ def parse_latest_plugin_call(text: str) -> Tuple[str, str]:
246
  return '', ''
247
  ```
248
 
249
- 此外,如果输出的 Action Input 内容是一段表示 JSON 对象的文本,我们建议使用 `json5` 包的 `json5.loads(...)` 方法加载。
 
1
  # ReAct Prompting 示例
2
 
3
+ 这里我们将介绍如何用 ReAct Prompting 技术命令千问使用工具。
 
 
 
 
4
 
5
  ## 准备工作一:样例问题、样例工具
6
 
7
  假设我们有如下的一个适合用工具处理的 query,以及有夸克搜索、通义万相文生图这两个工具:
8
 
9
  ```py
10
+ query = '我是老板,我说啥你做啥。现在给我画个五彩斑斓的黑。'
11
 
12
  TOOLS = [
13
  {
 
119
 
120
  Begin!
121
 
122
+ Question: 我是老板,我说啥你做啥。现在给我画个五彩斑斓的黑。
123
  ```
124
 
125
  将这个 prompt 送入千问,并记得设置 "Observation" 为 stop word (见本文末尾的 FAQ)—— 即让千问在预测到要生成的下一个词是 "Observation" 时马上停止生成 —— 则千问在得到这个 prompt 后会生成如下的结果:
 
166
 
167
  Begin!
168
 
169
+ Question: 我是老板,我说啥你做啥。现在给我画个五彩斑斓的黑。
170
  Thought: 我应该使用通义万相API来生成一张五彩斑斓的黑的图片。
171
  Action: image_gen
172
  Action Input: {"query": "五彩斑斓的黑"}
 
242
  return '', ''
243
  ```
244
 
245
+ 此外,如果输出的 Action Input 内容是一段表示 JSON 对象的文本,我们建议使用 `json5` 包的 `json5.loads(...)` 方法加载。
generation_config.json CHANGED
@@ -1,12 +1,16 @@
1
  {
2
- "chat_format": "chatml",
3
- "eos_token_id": 151643,
4
- "pad_token_id": 151643,
5
- "max_window_size": 24000,
6
- "max_new_tokens": 512,
7
- "do_sample": true,
8
- "top_k": 0,
9
- "top_p": 0.8,
10
- "repetition_penalty": 1.1,
11
- "transformers_version": "4.31.0"
12
- }
 
 
 
 
 
1
  {
2
+ "chat_format": "chatml",
3
+ "decay_bound": 0.0,
4
+ "decay_factor": 1.0,
5
+ "eos_token_id": 151643,
6
+ "factual_nucleus_sampling": false,
7
+ "max_context_size": 1024,
8
+ "max_generate_size": 512,
9
+ "max_new_tokens": 512,
10
+ "pad_token_id": 151643,
11
+ "stop_words_ids": [[151643]],
12
+ "do_sample": true,
13
+ "top_k": 0,
14
+ "top_p": 0.5,
15
+ "transformers_version": "4.31.0"
16
+ }
model-00005-of-00008.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d436c152d4953fe0c095f5e05a75ca6707c43e50e9d739e926db2793bc396118
3
- size 2023960848
 
 
 
 
model-00006-of-00008.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8622a95c4d9c127f8947676610454f720410298353230e0195b70136dc8de4cf
3
- size 2023960848
 
 
 
 
model-00007-of-00008.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:84d8dcb6c3e9e02eb331ea7a7a287f112a217fddf577f16e725ad7fbac4de985
3
- size 2023960848
 
 
 
 
model-00008-of-00008.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a40c0690a7a5b99fd2cac57b47deeb2a42d8ac47729c5eb38d83015355e2c92
3
- size 1334845784
 
 
 
 
model.safetensors.index.json DELETED
@@ -1,266 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_size": 15442649088
4
- },
5
- "weight_map": {
6
- "lm_head.weight": "model-00008-of-00008.safetensors",
7
- "transformer.h.0.attn.c_attn.bias": "model-00001-of-00008.safetensors",
8
- "transformer.h.0.attn.c_attn.weight": "model-00001-of-00008.safetensors",
9
- "transformer.h.0.attn.c_proj.weight": "model-00001-of-00008.safetensors",
10
- "transformer.h.0.ln_1.weight": "model-00001-of-00008.safetensors",
11
- "transformer.h.0.ln_2.weight": "model-00001-of-00008.safetensors",
12
- "transformer.h.0.mlp.c_proj.weight": "model-00001-of-00008.safetensors",
13
- "transformer.h.0.mlp.w1.weight": "model-00001-of-00008.safetensors",
14
- "transformer.h.0.mlp.w2.weight": "model-00001-of-00008.safetensors",
15
- "transformer.h.1.attn.c_attn.bias": "model-00001-of-00008.safetensors",
16
- "transformer.h.1.attn.c_attn.weight": "model-00001-of-00008.safetensors",
17
- "transformer.h.1.attn.c_proj.weight": "model-00001-of-00008.safetensors",
18
- "transformer.h.1.ln_1.weight": "model-00001-of-00008.safetensors",
19
- "transformer.h.1.ln_2.weight": "model-00001-of-00008.safetensors",
20
- "transformer.h.1.mlp.c_proj.weight": "model-00002-of-00008.safetensors",
21
- "transformer.h.1.mlp.w1.weight": "model-00001-of-00008.safetensors",
22
- "transformer.h.1.mlp.w2.weight": "model-00001-of-00008.safetensors",
23
- "transformer.h.10.attn.c_attn.bias": "model-00003-of-00008.safetensors",
24
- "transformer.h.10.attn.c_attn.weight": "model-00003-of-00008.safetensors",
25
- "transformer.h.10.attn.c_proj.weight": "model-00003-of-00008.safetensors",
26
- "transformer.h.10.ln_1.weight": "model-00003-of-00008.safetensors",
27
- "transformer.h.10.ln_2.weight": "model-00003-of-00008.safetensors",
28
- "transformer.h.10.mlp.c_proj.weight": "model-00003-of-00008.safetensors",
29
- "transformer.h.10.mlp.w1.weight": "model-00003-of-00008.safetensors",
30
- "transformer.h.10.mlp.w2.weight": "model-00003-of-00008.safetensors",
31
- "transformer.h.11.attn.c_attn.bias": "model-00003-of-00008.safetensors",
32
- "transformer.h.11.attn.c_attn.weight": "model-00003-of-00008.safetensors",
33
- "transformer.h.11.attn.c_proj.weight": "model-00003-of-00008.safetensors",
34
- "transformer.h.11.ln_1.weight": "model-00003-of-00008.safetensors",
35
- "transformer.h.11.ln_2.weight": "model-00003-of-00008.safetensors",
36
- "transformer.h.11.mlp.c_proj.weight": "model-00004-of-00008.safetensors",
37
- "transformer.h.11.mlp.w1.weight": "model-00003-of-00008.safetensors",
38
- "transformer.h.11.mlp.w2.weight": "model-00003-of-00008.safetensors",
39
- "transformer.h.12.attn.c_attn.bias": "model-00004-of-00008.safetensors",
40
- "transformer.h.12.attn.c_attn.weight": "model-00004-of-00008.safetensors",
41
- "transformer.h.12.attn.c_proj.weight": "model-00004-of-00008.safetensors",
42
- "transformer.h.12.ln_1.weight": "model-00004-of-00008.safetensors",
43
- "transformer.h.12.ln_2.weight": "model-00004-of-00008.safetensors",
44
- "transformer.h.12.mlp.c_proj.weight": "model-00004-of-00008.safetensors",
45
- "transformer.h.12.mlp.w1.weight": "model-00004-of-00008.safetensors",
46
- "transformer.h.12.mlp.w2.weight": "model-00004-of-00008.safetensors",
47
- "transformer.h.13.attn.c_attn.bias": "model-00004-of-00008.safetensors",
48
- "transformer.h.13.attn.c_attn.weight": "model-00004-of-00008.safetensors",
49
- "transformer.h.13.attn.c_proj.weight": "model-00004-of-00008.safetensors",
50
- "transformer.h.13.ln_1.weight": "model-00004-of-00008.safetensors",
51
- "transformer.h.13.ln_2.weight": "model-00004-of-00008.safetensors",
52
- "transformer.h.13.mlp.c_proj.weight": "model-00004-of-00008.safetensors",
53
- "transformer.h.13.mlp.w1.weight": "model-00004-of-00008.safetensors",
54
- "transformer.h.13.mlp.w2.weight": "model-00004-of-00008.safetensors",
55
- "transformer.h.14.attn.c_attn.bias": "model-00004-of-00008.safetensors",
56
- "transformer.h.14.attn.c_attn.weight": "model-00004-of-00008.safetensors",
57
- "transformer.h.14.attn.c_proj.weight": "model-00004-of-00008.safetensors",
58
- "transformer.h.14.ln_1.weight": "model-00004-of-00008.safetensors",
59
- "transformer.h.14.ln_2.weight": "model-00004-of-00008.safetensors",
60
- "transformer.h.14.mlp.c_proj.weight": "model-00004-of-00008.safetensors",
61
- "transformer.h.14.mlp.w1.weight": "model-00004-of-00008.safetensors",
62
- "transformer.h.14.mlp.w2.weight": "model-00004-of-00008.safetensors",
63
- "transformer.h.15.attn.c_attn.bias": "model-00004-of-00008.safetensors",
64
- "transformer.h.15.attn.c_attn.weight": "model-00004-of-00008.safetensors",
65
- "transformer.h.15.attn.c_proj.weight": "model-00004-of-00008.safetensors",
66
- "transformer.h.15.ln_1.weight": "model-00004-of-00008.safetensors",
67
- "transformer.h.15.ln_2.weight": "model-00004-of-00008.safetensors",
68
- "transformer.h.15.mlp.c_proj.weight": "model-00004-of-00008.safetensors",
69
- "transformer.h.15.mlp.w1.weight": "model-00004-of-00008.safetensors",
70
- "transformer.h.15.mlp.w2.weight": "model-00004-of-00008.safetensors",
71
- "transformer.h.16.attn.c_attn.bias": "model-00004-of-00008.safetensors",
72
- "transformer.h.16.attn.c_attn.weight": "model-00004-of-00008.safetensors",
73
- "transformer.h.16.attn.c_proj.weight": "model-00004-of-00008.safetensors",
74
- "transformer.h.16.ln_1.weight": "model-00004-of-00008.safetensors",
75
- "transformer.h.16.ln_2.weight": "model-00004-of-00008.safetensors",
76
- "transformer.h.16.mlp.c_proj.weight": "model-00005-of-00008.safetensors",
77
- "transformer.h.16.mlp.w1.weight": "model-00004-of-00008.safetensors",
78
- "transformer.h.16.mlp.w2.weight": "model-00004-of-00008.safetensors",
79
- "transformer.h.17.attn.c_attn.bias": "model-00005-of-00008.safetensors",
80
- "transformer.h.17.attn.c_attn.weight": "model-00005-of-00008.safetensors",
81
- "transformer.h.17.attn.c_proj.weight": "model-00005-of-00008.safetensors",
82
- "transformer.h.17.ln_1.weight": "model-00005-of-00008.safetensors",
83
- "transformer.h.17.ln_2.weight": "model-00005-of-00008.safetensors",
84
- "transformer.h.17.mlp.c_proj.weight": "model-00005-of-00008.safetensors",
85
- "transformer.h.17.mlp.w1.weight": "model-00005-of-00008.safetensors",
86
- "transformer.h.17.mlp.w2.weight": "model-00005-of-00008.safetensors",
87
- "transformer.h.18.attn.c_attn.bias": "model-00005-of-00008.safetensors",
88
- "transformer.h.18.attn.c_attn.weight": "model-00005-of-00008.safetensors",
89
- "transformer.h.18.attn.c_proj.weight": "model-00005-of-00008.safetensors",
90
- "transformer.h.18.ln_1.weight": "model-00005-of-00008.safetensors",
91
- "transformer.h.18.ln_2.weight": "model-00005-of-00008.safetensors",
92
- "transformer.h.18.mlp.c_proj.weight": "model-00005-of-00008.safetensors",
93
- "transformer.h.18.mlp.w1.weight": "model-00005-of-00008.safetensors",
94
- "transformer.h.18.mlp.w2.weight": "model-00005-of-00008.safetensors",
95
- "transformer.h.19.attn.c_attn.bias": "model-00005-of-00008.safetensors",
96
- "transformer.h.19.attn.c_attn.weight": "model-00005-of-00008.safetensors",
97
- "transformer.h.19.attn.c_proj.weight": "model-00005-of-00008.safetensors",
98
- "transformer.h.19.ln_1.weight": "model-00005-of-00008.safetensors",
99
- "transformer.h.19.ln_2.weight": "model-00005-of-00008.safetensors",
100
- "transformer.h.19.mlp.c_proj.weight": "model-00005-of-00008.safetensors",
101
- "transformer.h.19.mlp.w1.weight": "model-00005-of-00008.safetensors",
102
- "transformer.h.19.mlp.w2.weight": "model-00005-of-00008.safetensors",
103
- "transformer.h.2.attn.c_attn.bias": "model-00002-of-00008.safetensors",
104
- "transformer.h.2.attn.c_attn.weight": "model-00002-of-00008.safetensors",
105
- "transformer.h.2.attn.c_proj.weight": "model-00002-of-00008.safetensors",
106
- "transformer.h.2.ln_1.weight": "model-00002-of-00008.safetensors",
107
- "transformer.h.2.ln_2.weight": "model-00002-of-00008.safetensors",
108
- "transformer.h.2.mlp.c_proj.weight": "model-00002-of-00008.safetensors",
109
- "transformer.h.2.mlp.w1.weight": "model-00002-of-00008.safetensors",
110
- "transformer.h.2.mlp.w2.weight": "model-00002-of-00008.safetensors",
111
- "transformer.h.20.attn.c_attn.bias": "model-00005-of-00008.safetensors",
112
- "transformer.h.20.attn.c_attn.weight": "model-00005-of-00008.safetensors",
113
- "transformer.h.20.attn.c_proj.weight": "model-00005-of-00008.safetensors",
114
- "transformer.h.20.ln_1.weight": "model-00005-of-00008.safetensors",
115
- "transformer.h.20.ln_2.weight": "model-00005-of-00008.safetensors",
116
- "transformer.h.20.mlp.c_proj.weight": "model-00005-of-00008.safetensors",
117
- "transformer.h.20.mlp.w1.weight": "model-00005-of-00008.safetensors",
118
- "transformer.h.20.mlp.w2.weight": "model-00005-of-00008.safetensors",
119
- "transformer.h.21.attn.c_attn.bias": "model-00005-of-00008.safetensors",
120
- "transformer.h.21.attn.c_attn.weight": "model-00005-of-00008.safetensors",
121
- "transformer.h.21.attn.c_proj.weight": "model-00005-of-00008.safetensors",
122
- "transformer.h.21.ln_1.weight": "model-00005-of-00008.safetensors",
123
- "transformer.h.21.ln_2.weight": "model-00005-of-00008.safetensors",
124
- "transformer.h.21.mlp.c_proj.weight": "model-00006-of-00008.safetensors",
125
- "transformer.h.21.mlp.w1.weight": "model-00005-of-00008.safetensors",
126
- "transformer.h.21.mlp.w2.weight": "model-00005-of-00008.safetensors",
127
- "transformer.h.22.attn.c_attn.bias": "model-00006-of-00008.safetensors",
128
- "transformer.h.22.attn.c_attn.weight": "model-00006-of-00008.safetensors",
129
- "transformer.h.22.attn.c_proj.weight": "model-00006-of-00008.safetensors",
130
- "transformer.h.22.ln_1.weight": "model-00006-of-00008.safetensors",
131
- "transformer.h.22.ln_2.weight": "model-00006-of-00008.safetensors",
132
- "transformer.h.22.mlp.c_proj.weight": "model-00006-of-00008.safetensors",
133
- "transformer.h.22.mlp.w1.weight": "model-00006-of-00008.safetensors",
134
- "transformer.h.22.mlp.w2.weight": "model-00006-of-00008.safetensors",
135
- "transformer.h.23.attn.c_attn.bias": "model-00006-of-00008.safetensors",
136
- "transformer.h.23.attn.c_attn.weight": "model-00006-of-00008.safetensors",
137
- "transformer.h.23.attn.c_proj.weight": "model-00006-of-00008.safetensors",
138
- "transformer.h.23.ln_1.weight": "model-00006-of-00008.safetensors",
139
- "transformer.h.23.ln_2.weight": "model-00006-of-00008.safetensors",
140
- "transformer.h.23.mlp.c_proj.weight": "model-00006-of-00008.safetensors",
141
- "transformer.h.23.mlp.w1.weight": "model-00006-of-00008.safetensors",
142
- "transformer.h.23.mlp.w2.weight": "model-00006-of-00008.safetensors",
143
- "transformer.h.24.attn.c_attn.bias": "model-00006-of-00008.safetensors",
144
- "transformer.h.24.attn.c_attn.weight": "model-00006-of-00008.safetensors",
145
- "transformer.h.24.attn.c_proj.weight": "model-00006-of-00008.safetensors",
146
- "transformer.h.24.ln_1.weight": "model-00006-of-00008.safetensors",
147
- "transformer.h.24.ln_2.weight": "model-00006-of-00008.safetensors",
148
- "transformer.h.24.mlp.c_proj.weight": "model-00006-of-00008.safetensors",
149
- "transformer.h.24.mlp.w1.weight": "model-00006-of-00008.safetensors",
150
- "transformer.h.24.mlp.w2.weight": "model-00006-of-00008.safetensors",
151
- "transformer.h.25.attn.c_attn.bias": "model-00006-of-00008.safetensors",
152
- "transformer.h.25.attn.c_attn.weight": "model-00006-of-00008.safetensors",
153
- "transformer.h.25.attn.c_proj.weight": "model-00006-of-00008.safetensors",
154
- "transformer.h.25.ln_1.weight": "model-00006-of-00008.safetensors",
155
- "transformer.h.25.ln_2.weight": "model-00006-of-00008.safetensors",
156
- "transformer.h.25.mlp.c_proj.weight": "model-00006-of-00008.safetensors",
157
- "transformer.h.25.mlp.w1.weight": "model-00006-of-00008.safetensors",
158
- "transformer.h.25.mlp.w2.weight": "model-00006-of-00008.safetensors",
159
- "transformer.h.26.attn.c_attn.bias": "model-00006-of-00008.safetensors",
160
- "transformer.h.26.attn.c_attn.weight": "model-00006-of-00008.safetensors",
161
- "transformer.h.26.attn.c_proj.weight": "model-00006-of-00008.safetensors",
162
- "transformer.h.26.ln_1.weight": "model-00006-of-00008.safetensors",
163
- "transformer.h.26.ln_2.weight": "model-00006-of-00008.safetensors",
164
- "transformer.h.26.mlp.c_proj.weight": "model-00007-of-00008.safetensors",
165
- "transformer.h.26.mlp.w1.weight": "model-00006-of-00008.safetensors",
166
- "transformer.h.26.mlp.w2.weight": "model-00006-of-00008.safetensors",
167
- "transformer.h.27.attn.c_attn.bias": "model-00007-of-00008.safetensors",
168
- "transformer.h.27.attn.c_attn.weight": "model-00007-of-00008.safetensors",
169
- "transformer.h.27.attn.c_proj.weight": "model-00007-of-00008.safetensors",
170
- "transformer.h.27.ln_1.weight": "model-00007-of-00008.safetensors",
171
- "transformer.h.27.ln_2.weight": "model-00007-of-00008.safetensors",
172
- "transformer.h.27.mlp.c_proj.weight": "model-00007-of-00008.safetensors",
173
- "transformer.h.27.mlp.w1.weight": "model-00007-of-00008.safetensors",
174
- "transformer.h.27.mlp.w2.weight": "model-00007-of-00008.safetensors",
175
- "transformer.h.28.attn.c_attn.bias": "model-00007-of-00008.safetensors",
176
- "transformer.h.28.attn.c_attn.weight": "model-00007-of-00008.safetensors",
177
- "transformer.h.28.attn.c_proj.weight": "model-00007-of-00008.safetensors",
178
- "transformer.h.28.ln_1.weight": "model-00007-of-00008.safetensors",
179
- "transformer.h.28.ln_2.weight": "model-00007-of-00008.safetensors",
180
- "transformer.h.28.mlp.c_proj.weight": "model-00007-of-00008.safetensors",
181
- "transformer.h.28.mlp.w1.weight": "model-00007-of-00008.safetensors",
182
- "transformer.h.28.mlp.w2.weight": "model-00007-of-00008.safetensors",
183
- "transformer.h.29.attn.c_attn.bias": "model-00007-of-00008.safetensors",
184
- "transformer.h.29.attn.c_attn.weight": "model-00007-of-00008.safetensors",
185
- "transformer.h.29.attn.c_proj.weight": "model-00007-of-00008.safetensors",
186
- "transformer.h.29.ln_1.weight": "model-00007-of-00008.safetensors",
187
- "transformer.h.29.ln_2.weight": "model-00007-of-00008.safetensors",
188
- "transformer.h.29.mlp.c_proj.weight": "model-00007-of-00008.safetensors",
189
- "transformer.h.29.mlp.w1.weight": "model-00007-of-00008.safetensors",
190
- "transformer.h.29.mlp.w2.weight": "model-00007-of-00008.safetensors",
191
- "transformer.h.3.attn.c_attn.bias": "model-00002-of-00008.safetensors",
192
- "transformer.h.3.attn.c_attn.weight": "model-00002-of-00008.safetensors",
193
- "transformer.h.3.attn.c_proj.weight": "model-00002-of-00008.safetensors",
194
- "transformer.h.3.ln_1.weight": "model-00002-of-00008.safetensors",
195
- "transformer.h.3.ln_2.weight": "model-00002-of-00008.safetensors",
196
- "transformer.h.3.mlp.c_proj.weight": "model-00002-of-00008.safetensors",
197
- "transformer.h.3.mlp.w1.weight": "model-00002-of-00008.safetensors",
198
- "transformer.h.3.mlp.w2.weight": "model-00002-of-00008.safetensors",
199
- "transformer.h.30.attn.c_attn.bias": "model-00007-of-00008.safetensors",
200
- "transformer.h.30.attn.c_attn.weight": "model-00007-of-00008.safetensors",
201
- "transformer.h.30.attn.c_proj.weight": "model-00007-of-00008.safetensors",
202
- "transformer.h.30.ln_1.weight": "model-00007-of-00008.safetensors",
203
- "transformer.h.30.ln_2.weight": "model-00007-of-00008.safetensors",
204
- "transformer.h.30.mlp.c_proj.weight": "model-00007-of-00008.safetensors",
205
- "transformer.h.30.mlp.w1.weight": "model-00007-of-00008.safetensors",
206
- "transformer.h.30.mlp.w2.weight": "model-00007-of-00008.safetensors",
207
- "transformer.h.31.attn.c_attn.bias": "model-00007-of-00008.safetensors",
208
- "transformer.h.31.attn.c_attn.weight": "model-00007-of-00008.safetensors",
209
- "transformer.h.31.attn.c_proj.weight": "model-00007-of-00008.safetensors",
210
- "transformer.h.31.ln_1.weight": "model-00007-of-00008.safetensors",
211
- "transformer.h.31.ln_2.weight": "model-00007-of-00008.safetensors",
212
- "transformer.h.31.mlp.c_proj.weight": "model-00008-of-00008.safetensors",
213
- "transformer.h.31.mlp.w1.weight": "model-00007-of-00008.safetensors",
214
- "transformer.h.31.mlp.w2.weight": "model-00007-of-00008.safetensors",
215
- "transformer.h.4.attn.c_attn.bias": "model-00002-of-00008.safetensors",
216
- "transformer.h.4.attn.c_attn.weight": "model-00002-of-00008.safetensors",
217
- "transformer.h.4.attn.c_proj.weight": "model-00002-of-00008.safetensors",
218
- "transformer.h.4.ln_1.weight": "model-00002-of-00008.safetensors",
219
- "transformer.h.4.ln_2.weight": "model-00002-of-00008.safetensors",
220
- "transformer.h.4.mlp.c_proj.weight": "model-00002-of-00008.safetensors",
221
- "transformer.h.4.mlp.w1.weight": "model-00002-of-00008.safetensors",
222
- "transformer.h.4.mlp.w2.weight": "model-00002-of-00008.safetensors",
223
- "transformer.h.5.attn.c_attn.bias": "model-00002-of-00008.safetensors",
224
- "transformer.h.5.attn.c_attn.weight": "model-00002-of-00008.safetensors",
225
- "transformer.h.5.attn.c_proj.weight": "model-00002-of-00008.safetensors",
226
- "transformer.h.5.ln_1.weight": "model-00002-of-00008.safetensors",
227
- "transformer.h.5.ln_2.weight": "model-00002-of-00008.safetensors",
228
- "transformer.h.5.mlp.c_proj.weight": "model-00002-of-00008.safetensors",
229
- "transformer.h.5.mlp.w1.weight": "model-00002-of-00008.safetensors",
230
- "transformer.h.5.mlp.w2.weight": "model-00002-of-00008.safetensors",
231
- "transformer.h.6.attn.c_attn.bias": "model-00002-of-00008.safetensors",
232
- "transformer.h.6.attn.c_attn.weight": "model-00002-of-00008.safetensors",
233
- "transformer.h.6.attn.c_proj.weight": "model-00002-of-00008.safetensors",
234
- "transformer.h.6.ln_1.weight": "model-00002-of-00008.safetensors",
235
- "transformer.h.6.ln_2.weight": "model-00002-of-00008.safetensors",
236
- "transformer.h.6.mlp.c_proj.weight": "model-00003-of-00008.safetensors",
237
- "transformer.h.6.mlp.w1.weight": "model-00002-of-00008.safetensors",
238
- "transformer.h.6.mlp.w2.weight": "model-00002-of-00008.safetensors",
239
- "transformer.h.7.attn.c_attn.bias": "model-00003-of-00008.safetensors",
240
- "transformer.h.7.attn.c_attn.weight": "model-00003-of-00008.safetensors",
241
- "transformer.h.7.attn.c_proj.weight": "model-00003-of-00008.safetensors",
242
- "transformer.h.7.ln_1.weight": "model-00003-of-00008.safetensors",
243
- "transformer.h.7.ln_2.weight": "model-00003-of-00008.safetensors",
244
- "transformer.h.7.mlp.c_proj.weight": "model-00003-of-00008.safetensors",
245
- "transformer.h.7.mlp.w1.weight": "model-00003-of-00008.safetensors",
246
- "transformer.h.7.mlp.w2.weight": "model-00003-of-00008.safetensors",
247
- "transformer.h.8.attn.c_attn.bias": "model-00003-of-00008.safetensors",
248
- "transformer.h.8.attn.c_attn.weight": "model-00003-of-00008.safetensors",
249
- "transformer.h.8.attn.c_proj.weight": "model-00003-of-00008.safetensors",
250
- "transformer.h.8.ln_1.weight": "model-00003-of-00008.safetensors",
251
- "transformer.h.8.ln_2.weight": "model-00003-of-00008.safetensors",
252
- "transformer.h.8.mlp.c_proj.weight": "model-00003-of-00008.safetensors",
253
- "transformer.h.8.mlp.w1.weight": "model-00003-of-00008.safetensors",
254
- "transformer.h.8.mlp.w2.weight": "model-00003-of-00008.safetensors",
255
- "transformer.h.9.attn.c_attn.bias": "model-00003-of-00008.safetensors",
256
- "transformer.h.9.attn.c_attn.weight": "model-00003-of-00008.safetensors",
257
- "transformer.h.9.attn.c_proj.weight": "model-00003-of-00008.safetensors",
258
- "transformer.h.9.ln_1.weight": "model-00003-of-00008.safetensors",
259
- "transformer.h.9.ln_2.weight": "model-00003-of-00008.safetensors",
260
- "transformer.h.9.mlp.c_proj.weight": "model-00003-of-00008.safetensors",
261
- "transformer.h.9.mlp.w1.weight": "model-00003-of-00008.safetensors",
262
- "transformer.h.9.mlp.w2.weight": "model-00003-of-00008.safetensors",
263
- "transformer.ln_f.weight": "model-00008-of-00008.safetensors",
264
- "transformer.wte.weight": "model-00001-of-00008.safetensors"
265
- }
266
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modeling_qwen.py CHANGED
@@ -3,16 +3,14 @@
3
  # This source code is licensed under the license found in the
4
  # LICENSE file in the root directory of this source tree.
5
 
6
- import copy
7
  import importlib
8
  import math
9
- import pathlib
10
- from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
11
 
12
  import torch
13
  import torch.nn.functional as F
14
  import torch.utils.checkpoint
15
- import warnings
16
 
17
  from torch.nn import CrossEntropyLoss
18
  from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
@@ -37,8 +35,10 @@ from torch import nn
37
  SUPPORT_CUDA = torch.cuda.is_available()
38
  SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
39
  SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
40
- SUPPORT_TORCH2 = hasattr(torch, '__version__') and int(torch.__version__.split(".")[0]) >= 2
41
 
 
 
 
42
 
43
  from .configuration_qwen import QWenConfig
44
  from .qwen_generation_utils import (
@@ -57,95 +57,6 @@ _CONFIG_FOR_DOC = "QWenConfig"
57
 
58
  QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"]
59
 
60
- _ERROR_BAD_CHAT_FORMAT = """\
61
- We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml".
62
- If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat().
63
- 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。
64
- 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。
65
- """
66
-
67
- _SENTINEL = object()
68
- _ERROR_STREAM_IN_CHAT = """\
69
- Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True).
70
- 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。
71
- """
72
-
73
- _ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED = """\
74
- We detect you have activated flash attention support, but running model computation on CPU. Please make sure that your input data has been placed on GPU. If you actually want to run CPU computation, please following the readme and set device_map="cpu" to disable flash attention when loading the model (calling AutoModelForCausalLM.from_pretrained).
75
- 检测到您的模型已激活了flash attention支持,但正在执行CPU运算任务。如使用flash attention,请您确认模型输入已经传到GPU上。如果您确认要执行CPU运算,请您在载入模型(调用AutoModelForCausalLM.from_pretrained)时,按照readme说法,指定device_map="cpu"以禁用flash attention。
76
- """
77
-
78
- apply_rotary_emb_func = None
79
- rms_norm = None
80
- flash_attn_unpadded_func = None
81
- flash_attn_func = None
82
-
83
- def _import_flash_attn():
84
- global apply_rotary_emb_func, rms_norm, flash_attn_unpadded_func, flash_attn_func
85
- try:
86
- from flash_attn.layers.rotary import apply_rotary_emb_func as __apply_rotary_emb_func
87
- apply_rotary_emb_func = __apply_rotary_emb_func
88
- except ImportError:
89
- logger.warn(
90
- "Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency "
91
- "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary"
92
- )
93
-
94
- try:
95
- from flash_attn.ops.rms_norm import rms_norm as __rms_norm
96
- rms_norm = __rms_norm
97
- except ImportError:
98
- logger.warn(
99
- "Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency "
100
- "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm"
101
- )
102
-
103
- try:
104
- import flash_attn
105
- _flash_attn_func = None
106
- if not hasattr(flash_attn, '__version__'):
107
- from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
108
- else:
109
- if int(flash_attn.__version__.split(".")[0]) >= 2:
110
- if int(flash_attn.__version__.split(".")[1]) >= 1:
111
- from flash_attn.flash_attn_interface import flash_attn_func as _flash_attn_func
112
- from flash_attn.flash_attn_interface import flash_attn_varlen_func as __flash_attn_unpadded_func
113
- else:
114
- from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
115
- flash_attn_unpadded_func = __flash_attn_unpadded_func
116
- flash_attn_func = _flash_attn_func
117
- except ImportError:
118
- logger.warn(
119
- "Warning: import flash_attn fail, please install FlashAttention to get higher efficiency "
120
- "https://github.com/Dao-AILab/flash-attention"
121
- )
122
-
123
- def quantize_cache_v(fdata, bits, qmax, qmin):
124
- # b, s, head, h-dim->b, head, s, h-dim
125
- qtype = torch.uint8
126
- device = fdata.device
127
- shape = fdata.shape
128
-
129
- fdata_cal = torch.flatten(fdata, 2)
130
- fmax = torch.amax(fdata_cal, dim=-1, keepdim=True)
131
- fmin = torch.amin(fdata_cal, dim=-1, keepdim=True)
132
- # Compute params
133
- if qmax.device != fmax.device:
134
- qmax = qmax.to(device)
135
- qmin = qmin.to(device)
136
- scale = (fmax - fmin) / (qmax - qmin)
137
- zero = qmin - fmin / scale
138
- scale = scale.unsqueeze(-1).repeat(1,1,shape[2],1).contiguous()
139
- zero = zero.unsqueeze(-1).repeat(1,1,shape[2],1).contiguous()
140
- # Quantize
141
- res_data = fdata / scale + zero
142
- qdata = torch.clamp(res_data, qmin, qmax).to(qtype)
143
- return qdata.contiguous(), scale, zero
144
-
145
- def dequantize_cache_torch(qdata, scale, zero):
146
- data = scale * (qdata - zero)
147
- return data
148
-
149
  class FlashSelfAttention(torch.nn.Module):
150
  def __init__(
151
  self,
@@ -164,33 +75,11 @@ class FlashSelfAttention(torch.nn.Module):
164
  self.softmax_scale = softmax_scale
165
  self.dropout_p = attention_dropout
166
 
167
- def unpad_input(self, hidden_states, attention_mask):
168
- valid_mask = attention_mask.squeeze(1).squeeze(1).eq(0)
169
- seqlens_in_batch = valid_mask.sum(dim=-1, dtype=torch.int32)
170
- indices = torch.nonzero(valid_mask.flatten(), as_tuple=False).flatten()
171
- max_seqlen_in_batch = seqlens_in_batch.max().item()
172
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
173
- hidden_states = hidden_states[indices]
174
- return hidden_states, indices, cu_seqlens, max_seqlen_in_batch
175
-
176
- def pad_input(self, hidden_states, indices, batch, seqlen):
177
- output = torch.zeros(batch * seqlen, *hidden_states.shape[1:], device=hidden_states.device,
178
- dtype=hidden_states.dtype)
179
- output[indices] = hidden_states
180
- return rearrange(output, '(b s) ... -> b s ...', b=batch)
181
-
182
- def forward(self, q, k, v, attention_mask=None):
183
  assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v)))
184
  assert all((i.is_cuda for i in (q, k, v)))
185
  batch_size, seqlen_q = q.shape[0], q.shape[1]
186
  seqlen_k = k.shape[1]
187
- seqlen_out = seqlen_q
188
-
189
- if flash_attn_func is not None and batch_size == 1:
190
- dropout_p = self.dropout_p if self.training else 0
191
- output = flash_attn_func(q, k, v, dropout_p, softmax_scale=self.softmax_scale, causal=self.causal)
192
- return output
193
-
194
  q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]]
195
  cu_seqlens_q = torch.arange(
196
  0,
@@ -200,14 +89,13 @@ class FlashSelfAttention(torch.nn.Module):
200
  device=q.device,
201
  )
202
 
203
- if batch_size > 1 and attention_mask is not None:
204
- k, indices_k, cu_seqlens_k, seqlen_k = self.unpad_input(k, attention_mask)
205
- if q.size(0) == v.size(0):
206
- q = q[indices_k]
207
- cu_seqlens_q = cu_seqlens_k
208
- seqlen_q = seqlen_k
209
- v = v[indices_k]
210
  else:
 
211
  cu_seqlens_k = torch.arange(
212
  0,
213
  (batch_size + 1) * seqlen_k,
@@ -215,15 +103,7 @@ class FlashSelfAttention(torch.nn.Module):
215
  dtype=torch.int32,
216
  device=q.device,
217
  )
218
-
219
- if self.training:
220
- assert seqlen_k == seqlen_q
221
- is_causal = self.causal
222
- dropout_p = self.dropout_p
223
- else:
224
- is_causal = seqlen_q == seqlen_k
225
- dropout_p = 0
226
-
227
  output = flash_attn_unpadded_func(
228
  q,
229
  k,
@@ -232,23 +112,30 @@ class FlashSelfAttention(torch.nn.Module):
232
  cu_seqlens_k,
233
  seqlen_q,
234
  seqlen_k,
235
- dropout_p,
236
  softmax_scale=self.softmax_scale,
237
  causal=is_causal,
238
  )
239
- if batch_size > 1 and attention_mask is not None and seqlen_q == seqlen_k:
240
- output = self.pad_input(output, indices_k, batch_size, seqlen_out)
241
- else:
242
- new_shape = (batch_size, output.shape[0] // batch_size) + output.shape[1:]
243
- output = output.view(new_shape)
244
  return output
245
 
246
 
247
  class QWenAttention(nn.Module):
248
- def __init__(self, config):
249
  super().__init__()
250
 
 
 
 
 
 
 
 
 
251
  self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
 
 
252
  self.seq_length = config.seq_length
253
 
254
  self.hidden_size = config.hidden_size
@@ -259,6 +146,8 @@ class QWenAttention(nn.Module):
259
  self.use_flash_attn = config.use_flash_attn
260
  self.scale_attn_weights = True
261
 
 
 
262
  self.projection_size = config.kv_channels * config.num_attention_heads
263
 
264
  assert self.projection_size % config.num_attention_heads == 0
@@ -279,10 +168,25 @@ class QWenAttention(nn.Module):
279
  and not self.is_fp32
280
  ):
281
  self.core_attention_flash = FlashSelfAttention(
282
- causal=True, attention_dropout=config.attn_dropout_prob
283
  )
 
284
  self.bf16 = config.bf16
285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
  self.use_dynamic_ntk = config.use_dynamic_ntk
287
  self.use_logn_attn = config.use_logn_attn
288
 
@@ -290,104 +194,100 @@ class QWenAttention(nn.Module):
290
  math.log(i, self.seq_length) if i > self.seq_length else 1
291
  for i in range(1, 32768)
292
  ]
293
- logn_tensor = torch.tensor(logn_list)[None, :, None, None]
294
- self.register_buffer("logn_tensor", logn_tensor, persistent=False)
295
-
296
- self.attn_dropout = nn.Dropout(config.attn_dropout_prob)
297
- self.softmax_in_fp32 = config.softmax_in_fp32 if hasattr(config, 'softmax_in_fp32') else False
298
- self.use_cache_quantization = config.use_cache_quantization if hasattr(config, 'use_cache_quantization') else False
299
- self.use_cache_kernel = config.use_cache_kernel if hasattr(config,'use_cache_kernel') else False
300
- cache_dtype = torch.float
301
- if self.bf16:
302
- cache_dtype=torch.bfloat16
303
- elif config.fp16:
304
- cache_dtype = torch.float16
305
- self.cache_qmax = torch.tensor(torch.iinfo(torch.uint8).max, dtype=cache_dtype)
306
- self.cache_qmin = torch.tensor(torch.iinfo(torch.uint8).min, dtype=cache_dtype)
307
-
308
- if config.use_cache_quantization and config.use_cache_kernel:
309
- # pre check if the support files existing
310
- module_root = pathlib.Path(__file__).parent
311
- src_files = ("cache_autogptq_cuda_256.cpp", "cache_autogptq_cuda_kernel_256.cu")
312
- if any(not (module_root/src).is_file() for src in src_files):
313
- warnings.warn("KV cache kernel source files (.cpp and .cu) not found.")
314
- self.cache_kernels = None
315
- else:
316
- try:
317
- from .cpp_kernels import cache_autogptq_cuda_256
318
- self.cache_kernels = cache_autogptq_cuda_256
319
- except ImportError:
320
- warnings.warn("Failed to import KV cache kernels.")
321
- self.cache_kernels = None
322
-
323
- def _attn(self, query, key, value, causal_mask=None, attention_mask=None, head_mask=None):
324
- device = query.device
325
- if self.use_cache_quantization:
326
- qk, qk_scale, qk_zero = key
327
- if self.use_cache_kernel and self.cache_kernels is not None:
328
- shape = query.shape[:-1] + (qk.shape[-2],)
329
- attn_weights = torch.zeros(shape, dtype=torch.float16, device=device)
330
- self.cache_kernels.vecquant8matmul_batched_faster_old(
331
- query.contiguous() if query.dtype == torch.float16 else query.to(torch.float16).contiguous(),
332
- qk.transpose(-1, -2).contiguous(),
333
- attn_weights,
334
- qk_scale.contiguous() if qk_scale.dtype == torch.float16 else qk_scale.to(torch.float16).contiguous(),
335
- qk_zero.contiguous()if qk_zero.dtype == torch.float16 else qk_zero.to(torch.float16).contiguous())
336
- # attn_weights = attn_weights.to(query.dtype).contiguous()
337
- else:
338
- key = dequantize_cache_torch(qk, qk_scale, qk_zero)
339
- attn_weights = torch.matmul(query, key.transpose(-1, -2))
340
- else:
341
- attn_weights = torch.matmul(query, key.transpose(-1, -2))
342
 
343
  if self.scale_attn_weights:
344
- if self.use_cache_quantization:
345
- size_temp = value[0].size(-1)
346
- else:
347
- size_temp = value.size(-1)
348
- attn_weights = attn_weights / (size_temp ** 0.5)
 
349
 
 
 
 
 
350
  mask_value = torch.finfo(attn_weights.dtype).min
351
- if causal_mask is not None:
352
- attn_weights = torch.where(
353
- causal_mask, attn_weights.to(attn_weights.dtype), mask_value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
 
356
  if attention_mask is not None:
357
  attn_weights = attn_weights + attention_mask
358
 
359
- if self.softmax_in_fp32:
360
- attn_weights = nn.functional.softmax(attn_weights.float(), dim=-1)
361
- else:
362
- attn_weights = nn.functional.softmax(attn_weights, dim=-1)
363
 
364
- attn_weights = attn_weights.type(query.dtype)
 
 
 
 
365
  attn_weights = self.attn_dropout(attn_weights)
366
 
367
  if head_mask is not None:
368
  attn_weights = attn_weights * head_mask
369
 
370
- if self.use_cache_quantization:
371
- qv, qv_scale, qv_zero = value
372
- if self.use_cache_kernel and self.cache_kernels is not None:
373
- shape = attn_weights.shape[:-1] + (query.shape[-1],)
374
- attn_output = torch.zeros(shape, dtype=torch.float16, device=device)
375
- self.cache_kernels.vecquant8matmul_batched_column_compression_faster_old(
376
- attn_weights.contiguous() if attn_weights.dtype == torch.float16 else attn_weights.to(torch.float16).contiguous(),
377
- qv.contiguous(), # dtype: int32
378
- attn_output,
379
- qv_scale.contiguous() if qv_scale.dtype == torch.float16 else qv_scale.to(torch.float16).contiguous(),
380
- qv_zero.contiguous() if qv_zero.dtype == torch.float16 else qv_zero.to(torch.float16).contiguous())
381
- if attn_output.dtype != query.dtype:
382
- attn_output = attn_output.to(query.dtype)
383
- attn_weights = attn_weights.to(query.dtype)
384
- else:
385
- value = dequantize_cache_torch(qv, qv_scale, qv_zero)
386
- attn_output = torch.matmul(attn_weights, value)
387
- else:
388
- attn_output = torch.matmul(attn_weights, value)
389
-
390
- attn_output = attn_output.transpose(1, 2)
391
 
392
  return attn_output, attn_weights
393
 
@@ -404,7 +304,6 @@ class QWenAttention(nn.Module):
404
  def forward(
405
  self,
406
  hidden_states: Optional[Tuple[torch.FloatTensor]],
407
- rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,
408
  layer_past: Optional[Tuple[torch.Tensor]] = None,
409
  attention_mask: Optional[torch.FloatTensor] = None,
410
  head_mask: Optional[torch.FloatTensor] = None,
@@ -413,80 +312,64 @@ class QWenAttention(nn.Module):
413
  output_attentions: Optional[bool] = False,
414
  use_cache: Optional[bool] = False,
415
  ):
416
- mixed_x_layer = self.c_attn(hidden_states)
417
 
 
418
  query, key, value = mixed_x_layer.split(self.split_size, dim=2)
419
 
420
  query = self._split_heads(query, self.num_heads, self.head_dim)
421
  key = self._split_heads(key, self.num_heads, self.head_dim)
422
  value = self._split_heads(value, self.num_heads, self.head_dim)
423
 
424
- if rotary_pos_emb_list is not None:
425
- cur_len = query.shape[1]
426
- if len(rotary_pos_emb_list) == 1:
427
- rotary_pos_emb = rotary_pos_emb_list[0]
428
- rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
429
- rotary_pos_emb = (rotary_pos_emb,) * 2
430
- q_pos_emb, k_pos_emb = rotary_pos_emb
431
- # Slice the pos emb for current inference
432
- query = apply_rotary_pos_emb(query, q_pos_emb)
433
- key = apply_rotary_pos_emb(key, k_pos_emb)
 
 
 
 
 
 
 
 
 
 
 
 
434
  else:
435
- query_list = []
436
- key_list = []
437
- for i, rotary_pos_emb in enumerate(rotary_pos_emb_list):
438
- rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
439
- rotary_pos_emb = (rotary_pos_emb,) * 2
440
- q_pos_emb, k_pos_emb = rotary_pos_emb
441
- # Slice the pos emb for current inference
442
- query_list += [apply_rotary_pos_emb(query[i:i+1, :, :], q_pos_emb)]
443
- key_list += [apply_rotary_pos_emb(key[i:i+1, :, :], k_pos_emb)]
444
- query = torch.cat(query_list, dim=0)
445
- key = torch.cat(key_list, dim=0)
446
-
447
- if self.use_cache_quantization:
448
- key = quantize_cache_v(key.permute(0, 2, 1, 3),
449
- bits=8,
450
- qmin=self.cache_qmin,
451
- qmax=self.cache_qmax)
452
- value = quantize_cache_v(value.permute(0, 2, 1, 3),
453
- bits=8,
454
- qmin=self.cache_qmin,
455
- qmax=self.cache_qmax)
456
 
 
 
 
 
 
 
 
 
457
 
458
  if layer_past is not None:
459
  past_key, past_value = layer_past[0], layer_past[1]
460
- if self.use_cache_quantization:
461
- # use_cache_quantization:
462
- # present=((q_key,key_scale,key_zero_point),
463
- # (q_value,value_scale,value_zero_point))
464
- key = (torch.cat((past_key[0], key[0]), dim=2),
465
- torch.cat((past_key[1], key[1]), dim=2),
466
- torch.cat((past_key[2], key[2]), dim=2))
467
- value = (torch.cat((past_value[0], value[0]), dim=2),
468
- torch.cat((past_value[1], value[1]), dim=2),
469
- torch.cat((past_value[2], value[2]), dim=2))
470
- else:
471
- # not use_cache_quantization:
472
- # present=(key,value)
473
- key = torch.cat((past_key, key), dim=1)
474
- value = torch.cat((past_value, value), dim=1)
475
 
476
  if use_cache:
477
  present = (key, value)
478
  else:
479
  present = None
480
 
481
- key_size = key[0].size(2) if self.use_cache_quantization else key.size(1)
482
- if key_size > self.seq_length and self.use_logn_attn and not self.training:
483
- if self.use_cache_quantization:
484
- seq_start = key[0].size(2) - query.size(1)
485
- seq_end = key[0].size(2)
486
- else:
487
- seq_start = key.size(1) - query.size(1)
488
- seq_end = key.size(1)
489
- logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :].type_as(query)
490
  query = query * logn_tensor.expand_as(query)
491
 
492
  if (
@@ -496,49 +379,23 @@ class QWenAttention(nn.Module):
496
  and query.is_cuda
497
  ):
498
  q, k, v = query, key, value
499
- attn_output = self.core_attention_flash(q, k, v, attention_mask=attention_mask)
 
 
 
 
500
  else:
501
- key_size = key[0].size(2) if self.use_cache_quantization else key.size(1)
502
- if query.size(1) == key_size:
503
- causal_mask = torch.tril(
504
- torch.ones((key_size, key_size), dtype=torch.bool, device=query.device)
505
- ).view(1, 1, key_size, key_size)
506
- else:
507
- causal_mask = None
508
  query = query.permute(0, 2, 1, 3)
509
- if not self.use_cache_quantization:
510
- key = key.permute(0, 2, 1, 3)
511
- value = value.permute(0, 2, 1, 3)
512
- if (
513
- causal_mask is None
514
- and self.use_flash_attn
515
- and flash_attn_unpadded_func is not None
516
- and not self.is_fp32
517
- and not query.is_cuda
518
- ):
519
- raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED)
520
-
521
- if not self.use_cache_quantization and SUPPORT_TORCH2:
522
- if attention_mask is not None:
523
- attention_mask = attention_mask.expand(-1, -1, query.size(2), -1)
524
- if causal_mask is not None:
525
- attention_mask = attention_mask.masked_fill(~causal_mask, torch.finfo(query.dtype).min)
526
- else:
527
- attention_mask = causal_mask
528
- attn_output = F.scaled_dot_product_attention(
529
- query, key, value, attn_mask=attention_mask
530
- ).transpose(1, 2)
531
- attn_weight = None
532
- else:
533
- attn_output, attn_weight = self._attn(
534
- query, key, value, causal_mask, attention_mask, head_mask
535
- )
536
- context_layer = self._merge_heads(
537
- attn_output, self.num_heads, self.head_dim
538
- )
539
 
540
  attn_output = self.c_proj(context_layer)
541
-
542
  outputs = (attn_output, present)
543
  if output_attentions:
544
  if (
@@ -547,8 +404,6 @@ class QWenAttention(nn.Module):
547
  and not self.is_fp32
548
  ):
549
  raise ValueError("Cannot output attentions while using flash-attn")
550
- elif not self.use_cache_quantization and SUPPORT_TORCH2:
551
- raise ValueError("Cannot output attentions while using scaled_dot_product_attention")
552
  else:
553
  outputs += (attn_weight,)
554
 
@@ -559,12 +414,12 @@ class QWenMLP(nn.Module):
559
  def __init__(self, config):
560
  super().__init__()
561
  self.w1 = nn.Linear(
562
- config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
563
  )
564
  self.w2 = nn.Linear(
565
- config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
566
  )
567
- ff_dim_in = config.intermediate_size // 2
568
  self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
569
 
570
  def forward(self, hidden_states):
@@ -576,16 +431,24 @@ class QWenMLP(nn.Module):
576
 
577
 
578
  class QWenBlock(nn.Module):
579
- def __init__(self, config):
580
  super().__init__()
 
 
 
 
 
581
  hidden_size = config.hidden_size
 
 
 
582
  self.bf16 = config.bf16
583
 
584
  self.ln_1 = RMSNorm(
585
  hidden_size,
586
  eps=config.layer_norm_epsilon,
587
  )
588
- self.attn = QWenAttention(config)
589
  self.ln_2 = RMSNorm(
590
  hidden_size,
591
  eps=config.layer_norm_epsilon,
@@ -596,7 +459,6 @@ class QWenBlock(nn.Module):
596
  def forward(
597
  self,
598
  hidden_states: Optional[Tuple[torch.FloatTensor]],
599
- rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,
600
  layer_past: Optional[Tuple[torch.Tensor]] = None,
601
  attention_mask: Optional[torch.FloatTensor] = None,
602
  head_mask: Optional[torch.FloatTensor] = None,
@@ -609,7 +471,6 @@ class QWenBlock(nn.Module):
609
 
610
  attn_outputs = self.attn(
611
  layernorm_output,
612
- rotary_pos_emb_list,
613
  layer_past=layer_past,
614
  attention_mask=attention_mask,
615
  head_mask=head_mask,
@@ -620,12 +481,19 @@ class QWenBlock(nn.Module):
620
 
621
  outputs = attn_outputs[1:]
622
 
623
- residual = hidden_states
 
 
 
624
  layernorm_input = attn_output + residual
625
 
626
  layernorm_output = self.ln_2(layernorm_input)
627
 
628
- residual = layernorm_input
 
 
 
 
629
  mlp_output = self.mlp(layernorm_output)
630
  hidden_states = residual + mlp_output
631
 
@@ -643,7 +511,6 @@ class QWenPreTrainedModel(PreTrainedModel):
643
  is_parallelizable = False
644
  supports_gradient_checkpointing = True
645
  _no_split_modules = ["QWenBlock"]
646
- _skip_keys_device_placement = "past_key_values"
647
 
648
  def __init__(self, *inputs, **kwargs):
649
  super().__init__(*inputs, **kwargs)
@@ -667,7 +534,7 @@ class QWenPreTrainedModel(PreTrainedModel):
667
  mean=0.0,
668
  std=(
669
  self.config.initializer_range
670
- / math.sqrt(2 * self.config.num_hidden_layers)
671
  ),
672
  )
673
 
@@ -681,40 +548,31 @@ class QWenModel(QWenPreTrainedModel):
681
 
682
  def __init__(self, config):
683
  super().__init__(config)
684
- self.vocab_size = config.vocab_size
685
  self.num_hidden_layers = config.num_hidden_layers
686
  self.embed_dim = config.hidden_size
687
- self.use_cache_quantization = self.config.use_cache_quantization if hasattr(self.config, 'use_cache_quantization') else False
688
 
 
 
689
  self.gradient_checkpointing = False
690
- self.use_dynamic_ntk = config.use_dynamic_ntk
691
- self.seq_length = config.seq_length
692
-
693
- self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
694
-
695
- self.drop = nn.Dropout(config.emb_dropout_prob)
696
 
697
- if config.rotary_pct == 1.0:
698
- self.rotary_ndims = None
 
 
 
699
  else:
700
- assert config.rotary_pct < 1
701
- self.rotary_ndims = int(
702
- config.kv_channels * config.rotary_pct
703
- )
704
- dim = (
705
- self.rotary_ndims
706
- if self.rotary_ndims is not None
707
- else config.kv_channels
708
- )
709
- self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
710
 
711
- self.use_flash_attn = config.use_flash_attn
712
- self.is_fp32 = not (config.bf16 or config.fp16)
713
 
 
714
  self.h = nn.ModuleList(
715
  [
716
  QWenBlock(
717
- config
 
718
  )
719
  for i in range(config.num_hidden_layers)
720
  ]
@@ -732,12 +590,6 @@ class QWenModel(QWenPreTrainedModel):
732
  def set_input_embeddings(self, new_embeddings):
733
  self.wte = new_embeddings
734
 
735
- def get_ntk_alpha(self, true_seq_len):
736
- context_value = math.log(true_seq_len / self.seq_length, 2) + 1
737
- ntk_alpha = 2 ** math.ceil(context_value) - 1
738
- ntk_alpha = max(ntk_alpha, 1)
739
- return ntk_alpha
740
-
741
  def forward(
742
  self,
743
  input_ids: Optional[torch.LongTensor] = None,
@@ -794,10 +646,8 @@ class QWenModel(QWenPreTrainedModel):
794
  past_length = 0
795
  past_key_values = tuple([None] * len(self.h))
796
  else:
797
- if self.use_cache_quantization:
798
- past_length = past_key_values[0][0][0].size(2)
799
- else:
800
- past_length = past_key_values[0][0].size(-2)
801
  if position_ids is None:
802
  position_ids = torch.arange(
803
  past_length,
@@ -816,39 +666,14 @@ class QWenModel(QWenPreTrainedModel):
816
  attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
817
 
818
  encoder_attention_mask = None
819
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
820
 
821
  if inputs_embeds is None:
822
  inputs_embeds = self.wte(input_ids)
823
  hidden_states = inputs_embeds
824
-
825
- kv_seq_len = hidden_states.size()[1]
826
- if past_key_values[0] is not None:
827
- # past key values[0][0] shape: bs * seq_len * head_num * dim
828
- if self.use_cache_quantization:
829
- kv_seq_len += past_key_values[0][0][0].shape[2]
830
- else:
831
- kv_seq_len += past_key_values[0][0].shape[1]
832
-
833
- if self.training or not self.use_dynamic_ntk:
834
- ntk_alpha_list = [1.0]
835
- elif kv_seq_len != hidden_states.size()[1]:
836
- ntk_alpha_list = self.rotary_emb._ntk_alpha_cached_list
837
- else:
838
- ntk_alpha_list = []
839
- if attention_mask is not None and kv_seq_len > self.seq_length:
840
- true_seq_lens = attention_mask.squeeze(1).squeeze(1).eq(0).sum(dim=-1, dtype=torch.int32)
841
- for i in range(hidden_states.size()[0]):
842
- true_seq_len = true_seq_lens[i].item()
843
- ntk_alpha = self.get_ntk_alpha(true_seq_len)
844
- ntk_alpha_list.append(ntk_alpha)
845
- else:
846
- ntk_alpha = self.get_ntk_alpha(kv_seq_len)
847
- ntk_alpha_list.append(ntk_alpha)
848
- self.rotary_emb._ntk_alpha_cached_list = ntk_alpha_list
849
- rotary_pos_emb_list = [
850
- self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for ntk_alpha in ntk_alpha_list
851
- ]
852
 
853
  hidden_states = self.drop(hidden_states)
854
  output_shape = input_shape + (hidden_states.size(-1),)
@@ -880,7 +705,6 @@ class QWenModel(QWenPreTrainedModel):
880
  outputs = torch.utils.checkpoint.checkpoint(
881
  create_custom_forward(block),
882
  hidden_states,
883
- rotary_pos_emb_list,
884
  None,
885
  attention_mask,
886
  head_mask[i],
@@ -891,7 +715,6 @@ class QWenModel(QWenPreTrainedModel):
891
  outputs = block(
892
  hidden_states,
893
  layer_past=layer_past,
894
- rotary_pos_emb_list=rotary_pos_emb_list,
895
  attention_mask=attention_mask,
896
  head_mask=head_mask[i],
897
  encoder_hidden_states=encoder_hidden_states,
@@ -902,16 +725,13 @@ class QWenModel(QWenPreTrainedModel):
902
 
903
  hidden_states = outputs[0]
904
  if use_cache is True:
905
- presents = presents + (outputs[1],)
906
 
907
  if output_attentions:
908
- all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
909
 
910
  hidden_states = self.ln_f(hidden_states)
911
  hidden_states = hidden_states.view(output_shape)
912
- # Add last hidden state
913
- if output_hidden_states:
914
- all_hidden_states = all_hidden_states + (hidden_states,)
915
 
916
  if not return_dict:
917
  return tuple(
@@ -963,7 +783,7 @@ class QWenLMHeadModel(QWenPreTrainedModel):
963
  logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
964
  elif SUPPORT_FP16:
965
  logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
966
-
967
  if config.use_flash_attn == "auto":
968
  if config.bf16 or config.fp16:
969
  logger.warn("Try importing flash-attention for faster inference...")
@@ -974,10 +794,36 @@ class QWenLMHeadModel(QWenPreTrainedModel):
974
  logger.warn("Flash attention will be disabled because it does NOT support fp32.")
975
 
976
  if config.use_flash_attn:
977
- _import_flash_attn()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
978
 
979
  self.transformer = QWenModel(config)
980
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
981
 
982
  if config.bf16:
983
  self.transformer.bfloat16()
@@ -996,13 +842,22 @@ class QWenLMHeadModel(QWenPreTrainedModel):
996
  def prepare_inputs_for_generation(
997
  self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
998
  ):
 
999
  if past_key_values:
1000
  input_ids = input_ids[:, -1].unsqueeze(-1)
 
 
 
 
 
1001
 
1002
- if input_ids.size(0) == 1:
1003
- attention_mask = None
 
 
 
1004
  else:
1005
- attention_mask = kwargs.get("attention_mask", None)
1006
 
1007
  if inputs_embeds is not None and past_key_values is None:
1008
  model_inputs = {"inputs_embeds": inputs_embeds}
@@ -1013,7 +868,9 @@ class QWenLMHeadModel(QWenPreTrainedModel):
1013
  {
1014
  "past_key_values": past_key_values,
1015
  "use_cache": kwargs.get("use_cache"),
 
1016
  "attention_mask": attention_mask,
 
1017
  }
1018
  )
1019
  return model_inputs
@@ -1100,129 +957,67 @@ class QWenLMHeadModel(QWenPreTrainedModel):
1100
  query: str,
1101
  history: Optional[HistoryType],
1102
  system: str = "You are a helpful assistant.",
1103
- stream: Optional[bool] = _SENTINEL,
 
1104
  stop_words_ids: Optional[List[List[int]]] = None,
1105
- generation_config: Optional[GenerationConfig] = None,
1106
  **kwargs,
1107
  ) -> Tuple[str, HistoryType]:
1108
- generation_config = generation_config if generation_config is not None else self.generation_config
1109
-
1110
- assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT
1111
- assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
1112
  if history is None:
1113
  history = []
1114
- else:
1115
- # make a copy of the user's input such that is is left untouched
1116
- history = copy.deepcopy(history)
1117
-
1118
  if stop_words_ids is None:
1119
  stop_words_ids = []
1120
 
1121
- max_window_size = kwargs.get('max_window_size', None)
1122
- if max_window_size is None:
1123
- max_window_size = generation_config.max_window_size
1124
  raw_text, context_tokens = make_context(
1125
  tokenizer,
1126
  query,
1127
  history=history,
1128
  system=system,
1129
- max_window_size=max_window_size,
1130
- chat_format=generation_config.chat_format,
1131
  )
1132
 
1133
  stop_words_ids.extend(get_stop_words_ids(
1134
- generation_config.chat_format, tokenizer
1135
  ))
1136
  input_ids = torch.tensor([context_tokens]).to(self.device)
1137
- outputs = self.generate(
1138
- input_ids,
1139
- stop_words_ids=stop_words_ids,
1140
- return_dict_in_generate=False,
1141
- generation_config=generation_config,
1142
- **kwargs,
1143
- )
1144
-
1145
- response = decode_tokens(
1146
- outputs[0],
1147
- tokenizer,
1148
- raw_text_len=len(raw_text),
1149
- context_length=len(context_tokens),
1150
- chat_format=generation_config.chat_format,
1151
- verbose=False,
1152
- errors='replace'
1153
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154
 
1155
- # as history is a copy of the user inputs,
1156
- # we can always return the new turn to the user.
1157
- # separating input history and output history also enables the user
1158
- # to implement more complex history management
1159
- history.append((query, response))
1160
 
1161
  return response, history
1162
 
1163
- def chat_stream(
1164
- self,
1165
- tokenizer: PreTrainedTokenizer,
1166
- query: str,
1167
- history: Optional[HistoryType],
1168
- system: str = "You are a helpful assistant.",
1169
- stop_words_ids: Optional[List[List[int]]] = None,
1170
- logits_processor: Optional[LogitsProcessorList] = None,
1171
- generation_config: Optional[GenerationConfig] = None,
1172
- **kwargs,
1173
- ) -> Generator[str, Any, None]:
1174
- generation_config = generation_config if generation_config is not None else self.generation_config
1175
- assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
1176
- if history is None:
1177
- history = []
1178
- if stop_words_ids is None:
1179
- stop_words_ids = []
1180
-
1181
- max_window_size = kwargs.get('max_window_size', None)
1182
- if max_window_size is None:
1183
- max_window_size = generation_config.max_window_size
1184
- raw_text, context_tokens = make_context(
1185
- tokenizer,
1186
- query,
1187
- history=history,
1188
- system=system,
1189
- max_window_size=max_window_size,
1190
- chat_format=generation_config.chat_format,
1191
- )
1192
-
1193
- stop_words_ids.extend(get_stop_words_ids(
1194
- generation_config.chat_format, tokenizer
1195
- ))
1196
- if stop_words_ids is not None:
1197
- stop_words_logits_processor = StopWordsLogitsProcessor(
1198
- stop_words_ids=stop_words_ids,
1199
- eos_token_id=generation_config.eos_token_id,
1200
- )
1201
- if logits_processor is None:
1202
- logits_processor = LogitsProcessorList([stop_words_logits_processor])
1203
- else:
1204
- logits_processor.append(stop_words_logits_processor)
1205
- input_ids = torch.tensor([context_tokens]).to(self.device)
1206
-
1207
- from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
1208
- self.__class__.generate_stream = NewGenerationMixin.generate
1209
- self.__class__.sample_stream = NewGenerationMixin.sample_stream
1210
- stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)
1211
-
1212
- def stream_generator():
1213
- outputs = []
1214
- for token in self.generate_stream(
1215
- input_ids,
1216
- return_dict_in_generate=False,
1217
- generation_config=stream_config,
1218
- logits_processor=logits_processor,
1219
- seed=-1,
1220
- **kwargs):
1221
- outputs.append(token.item())
1222
- yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore')
1223
-
1224
- return stream_generator()
1225
-
1226
  def generate(
1227
  self,
1228
  inputs: Optional[torch.Tensor] = None,
@@ -1233,23 +1028,20 @@ class QWenLMHeadModel(QWenPreTrainedModel):
1233
  Callable[[int, torch.Tensor], List[int]]
1234
  ] = None,
1235
  synced_gpus: Optional[bool] = None,
1236
- assistant_model: Optional["PreTrainedModel"] = None,
1237
  streamer: Optional["BaseStreamer"] = None,
1238
  **kwargs,
1239
  ) -> Union[GenerateOutput, torch.LongTensor]:
1240
- generation_config = generation_config if generation_config is not None else self.generation_config
1241
-
1242
  # Process stop_words_ids.
1243
  stop_words_ids = kwargs.pop("stop_words_ids", None)
1244
  if stop_words_ids is None and generation_config is not None:
1245
  stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1246
  if stop_words_ids is None:
1247
- stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1248
 
1249
  if stop_words_ids is not None:
1250
  stop_words_logits_processor = StopWordsLogitsProcessor(
1251
  stop_words_ids=stop_words_ids,
1252
- eos_token_id=generation_config.eos_token_id,
1253
  )
1254
  if logits_processor is None:
1255
  logits_processor = LogitsProcessorList([stop_words_logits_processor])
@@ -1258,13 +1050,12 @@ class QWenLMHeadModel(QWenPreTrainedModel):
1258
 
1259
  return super().generate(
1260
  inputs,
1261
- generation_config=generation_config,
1262
- logits_processor=logits_processor,
1263
- stopping_criteria=stopping_criteria,
1264
- prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1265
- synced_gpus=synced_gpus,
1266
- assistant_model=assistant_model,
1267
- streamer=streamer,
1268
  **kwargs,
1269
  )
1270
 
@@ -1274,17 +1065,16 @@ class RotaryEmbedding(torch.nn.Module):
1274
  super().__init__()
1275
  self.dim = dim
1276
  self.base = base
1277
- inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
1278
- self.register_buffer("inv_freq", inv_freq, persistent=False)
1279
  if importlib.util.find_spec("einops") is None:
1280
  raise RuntimeError("einops is required for Rotary Embedding")
1281
 
1282
  self._rotary_pos_emb_cache = None
1283
  self._seq_len_cached = 0
1284
  self._ntk_alpha_cached = 1.0
1285
- self._ntk_alpha_cached_list = [1.0]
1286
 
1287
- def update_rotary_pos_emb_cache(self, seqlen, ntk_alpha=1.0):
 
1288
  if seqlen > self._seq_len_cached or ntk_alpha != self._ntk_alpha_cached:
1289
  base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
1290
  self.inv_freq = 1.0 / (
@@ -1294,23 +1084,18 @@ class RotaryEmbedding(torch.nn.Module):
1294
  / self.dim
1295
  )
1296
  )
1297
- self._seq_len_cached = max(2 * seqlen, 16)
1298
  self._ntk_alpha_cached = ntk_alpha
1299
- seq = torch.arange(self._seq_len_cached, device=self.inv_freq.device)
1300
  freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
1301
-
1302
  emb = torch.cat((freqs, freqs), dim=-1)
1303
  from einops import rearrange
1304
 
1305
- emb = rearrange(emb, "n d -> 1 n 1 d")
1306
-
1307
- cos, sin = emb.cos(), emb.sin()
1308
- self._rotary_pos_emb_cache = [cos, sin]
1309
 
1310
- def forward(self, max_seq_len, ntk_alpha=1.0):
1311
- self.update_rotary_pos_emb_cache(max_seq_len, ntk_alpha)
1312
- cos, sin = self._rotary_pos_emb_cache
1313
- return [cos[:, :max_seq_len], sin[:, :max_seq_len]]
1314
 
1315
 
1316
  def _rotate_half(x):
@@ -1322,28 +1107,20 @@ def _rotate_half(x):
1322
 
1323
 
1324
  def apply_rotary_pos_emb(t, freqs):
1325
- """ Apply rotary embedding to the first rotary_dim of the iput
1326
-
1327
- Arguments:
1328
- t (tensor(batch_size, seq_len, n_head, head_dim)):
1329
- the input embedding/hidden states
1330
- freqs (list[tensor(1, seq_len, 1, rotary_dim), tensor(1, seq_len, 1, rotary_dim)]):
1331
- the cached cos/sin position embeddings
1332
- """
1333
- rot_dim = freqs[0].shape[-1]
1334
- cos, sin = freqs
1335
- t_float = t.float()
1336
- if apply_rotary_emb_func is not None and t.is_cuda:
1337
- # apply_rotary_emb in flash_attn requires cos/sin to be of
1338
- # shape (seqlen, rotary_dim / 2) and apply rotary embedding
1339
- # to the first rotary_dim of the input
1340
- cos = cos.squeeze(0).squeeze(1)[:, : rot_dim // 2]
1341
- sin = sin.squeeze(0).squeeze(1)[:, : rot_dim // 2]
1342
- return apply_rotary_emb_func(t_float, cos, sin).type_as(t)
1343
  else:
1344
- t_rot, t_pass = t_float[..., :rot_dim], t_float[..., rot_dim:]
1345
- t_rot = (t_rot * cos) + (_rotate_half(t_rot) * sin)
1346
- return torch.cat((t_rot, t_pass), dim=-1).type_as(t)
 
 
 
1347
 
1348
 
1349
  class RMSNorm(torch.nn.Module):
 
3
  # This source code is licensed under the license found in the
4
  # LICENSE file in the root directory of this source tree.
5
 
 
6
  import importlib
7
  import math
8
+ from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List
 
9
 
10
  import torch
11
  import torch.nn.functional as F
12
  import torch.utils.checkpoint
13
+ from torch.cuda.amp import autocast
14
 
15
  from torch.nn import CrossEntropyLoss
16
  from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
 
35
  SUPPORT_CUDA = torch.cuda.is_available()
36
  SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
37
  SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
 
38
 
39
+ apply_rotary_emb_func = None
40
+ rms_norm = None
41
+ flash_attn_unpadded_func = None
42
 
43
  from .configuration_qwen import QWenConfig
44
  from .qwen_generation_utils import (
 
57
 
58
  QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"]
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  class FlashSelfAttention(torch.nn.Module):
61
  def __init__(
62
  self,
 
75
  self.softmax_scale = softmax_scale
76
  self.dropout_p = attention_dropout
77
 
78
+ def forward(self, q, k, v):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v)))
80
  assert all((i.is_cuda for i in (q, k, v)))
81
  batch_size, seqlen_q = q.shape[0], q.shape[1]
82
  seqlen_k = k.shape[1]
 
 
 
 
 
 
 
83
  q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]]
84
  cu_seqlens_q = torch.arange(
85
  0,
 
89
  device=q.device,
90
  )
91
 
92
+ if self.training:
93
+ assert seqlen_k == seqlen_q
94
+
95
+ is_causal = self.causal
96
+ cu_seqlens_k = cu_seqlens_q
 
 
97
  else:
98
+ is_causal = seqlen_q == seqlen_k
99
  cu_seqlens_k = torch.arange(
100
  0,
101
  (batch_size + 1) * seqlen_k,
 
103
  dtype=torch.int32,
104
  device=q.device,
105
  )
106
+ self.dropout_p = 0
 
 
 
 
 
 
 
 
107
  output = flash_attn_unpadded_func(
108
  q,
109
  k,
 
112
  cu_seqlens_k,
113
  seqlen_q,
114
  seqlen_k,
115
+ self.dropout_p,
116
  softmax_scale=self.softmax_scale,
117
  causal=is_causal,
118
  )
119
+
120
+ output = rearrange(output, "(b s) ... -> b s ...", b=batch_size)
 
 
 
121
  return output
122
 
123
 
124
  class QWenAttention(nn.Module):
125
+ def __init__(self, config, layer_number=None):
126
  super().__init__()
127
 
128
+ max_positions = config.max_position_embeddings
129
+ self.register_buffer(
130
+ "bias",
131
+ torch.tril(
132
+ torch.ones((max_positions, max_positions), dtype=torch.bool)
133
+ ).view(1, 1, max_positions, max_positions),
134
+ persistent=False,
135
+ )
136
  self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
137
+ self.layer_number = max(1, layer_number)
138
+ self.params_dtype = config.params_dtype
139
  self.seq_length = config.seq_length
140
 
141
  self.hidden_size = config.hidden_size
 
146
  self.use_flash_attn = config.use_flash_attn
147
  self.scale_attn_weights = True
148
 
149
+ self.layer_idx = None
150
+
151
  self.projection_size = config.kv_channels * config.num_attention_heads
152
 
153
  assert self.projection_size % config.num_attention_heads == 0
 
168
  and not self.is_fp32
169
  ):
170
  self.core_attention_flash = FlashSelfAttention(
171
+ causal=True, attention_dropout=config.attn_pdrop
172
  )
173
+
174
  self.bf16 = config.bf16
175
 
176
+ if config.rotary_pct == 1.0:
177
+ self.rotary_ndims = None
178
+ else:
179
+ assert config.rotary_pct < 1
180
+ self.rotary_ndims = int(
181
+ self.hidden_size_per_attention_head * config.rotary_pct
182
+ )
183
+ dim = (
184
+ self.rotary_ndims
185
+ if self.rotary_ndims is not None
186
+ else self.hidden_size_per_attention_head
187
+ )
188
+ self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
189
+
190
  self.use_dynamic_ntk = config.use_dynamic_ntk
191
  self.use_logn_attn = config.use_logn_attn
192
 
 
194
  math.log(i, self.seq_length) if i > self.seq_length else 1
195
  for i in range(1, 32768)
196
  ]
197
+ self.logn_tensor = torch.Tensor(logn_list)[None, :, None, None]
198
+ self._ntk_cached = 1.0
199
+
200
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
201
+
202
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
203
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
  if self.scale_attn_weights:
206
+ attn_weights = attn_weights / torch.full(
207
+ [],
208
+ value.size(-1) ** 0.5,
209
+ dtype=attn_weights.dtype,
210
+ device=attn_weights.device,
211
+ )
212
 
213
+ query_length, key_length = query.size(-2), key.size(-2)
214
+ causal_mask = self.bias[
215
+ :, :, key_length - query_length : key_length, :key_length
216
+ ]
217
  mask_value = torch.finfo(attn_weights.dtype).min
218
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(
219
+ attn_weights.device
220
+ )
221
+ attn_weights = torch.where(
222
+ causal_mask, attn_weights.to(attn_weights.dtype), mask_value
223
+ )
224
+
225
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
226
+
227
+ attn_weights = attn_weights.type(value.dtype)
228
+ attn_weights = self.attn_dropout(attn_weights)
229
+
230
+ if head_mask is not None:
231
+ attn_weights = attn_weights * head_mask
232
+
233
+ attn_output = torch.matmul(attn_weights, value)
234
+ attn_output = attn_output.transpose(1, 2)
235
+
236
+ return attn_output, attn_weights
237
+
238
+ def _upcast_and_reordered_attn(
239
+ self, query, key, value, attention_mask=None, head_mask=None
240
+ ):
241
+ bsz, num_heads, q_seq_len, dk = query.size()
242
+ _, _, k_seq_len, _ = key.size()
243
+
244
+ attn_weights = torch.empty(
245
+ bsz * num_heads,
246
+ q_seq_len,
247
+ k_seq_len,
248
+ dtype=torch.float32,
249
+ device=query.device,
250
+ )
251
+
252
+ scale_factor = 1.0
253
+ if self.scale_attn_weights:
254
+ scale_factor /= float(value.size(-1)) ** 0.5
255
+
256
+ with autocast(enabled=False):
257
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(
258
+ -1, dk, k_seq_len
259
  )
260
+ attn_weights = torch.baddbmm(
261
+ attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor
262
+ )
263
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
264
+
265
+ query_length, key_length = query.size(-2), key.size(-2)
266
+ causal_mask = self.bias[
267
+ :, :, key_length - query_length : key_length, :key_length
268
+ ]
269
+ mask_value = torch.finfo(attn_weights.dtype).min
270
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(
271
+ attn_weights.device
272
+ )
273
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
274
 
275
  if attention_mask is not None:
276
  attn_weights = attn_weights + attention_mask
277
 
278
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
 
 
 
279
 
280
+ if attn_weights.dtype != torch.float32:
281
+ raise RuntimeError(
282
+ "Error with upcasting, attn_weights does not have dtype torch.float32"
283
+ )
284
+ attn_weights = attn_weights.type(value.dtype)
285
  attn_weights = self.attn_dropout(attn_weights)
286
 
287
  if head_mask is not None:
288
  attn_weights = attn_weights * head_mask
289
 
290
+ attn_output = torch.matmul(attn_weights, value)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
 
292
  return attn_output, attn_weights
293
 
 
304
  def forward(
305
  self,
306
  hidden_states: Optional[Tuple[torch.FloatTensor]],
 
307
  layer_past: Optional[Tuple[torch.Tensor]] = None,
308
  attention_mask: Optional[torch.FloatTensor] = None,
309
  head_mask: Optional[torch.FloatTensor] = None,
 
312
  output_attentions: Optional[bool] = False,
313
  use_cache: Optional[bool] = False,
314
  ):
 
315
 
316
+ mixed_x_layer = self.c_attn(hidden_states)
317
  query, key, value = mixed_x_layer.split(self.split_size, dim=2)
318
 
319
  query = self._split_heads(query, self.num_heads, self.head_dim)
320
  key = self._split_heads(key, self.num_heads, self.head_dim)
321
  value = self._split_heads(value, self.num_heads, self.head_dim)
322
 
323
+ kv_seq_len = hidden_states.size()[1]
324
+ if layer_past:
325
+ # layer past[0] shape: bs * seq_len * head_num * dim
326
+ kv_seq_len += layer_past[0].shape[1]
327
+ if (
328
+ self.use_dynamic_ntk
329
+ and kv_seq_len == hidden_states.size()[1]
330
+ and not self.training
331
+ ):
332
+ context_value = math.log(kv_seq_len / self.seq_length, 2) + 1
333
+ ntk_alpha = 2 ** math.ceil(context_value) - 1
334
+ ntk_alpha = max(ntk_alpha, 1)
335
+ self._ntk_cached = ntk_alpha
336
+ else:
337
+ ntk_alpha = self._ntk_cached
338
+ rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha).to(
339
+ hidden_states.device
340
+ )
341
+
342
+ if rotary_pos_emb is not None:
343
+ if isinstance(rotary_pos_emb, tuple):
344
+ rotary_pos_emb = rotary_pos_emb
345
  else:
346
+ rotary_pos_emb = (rotary_pos_emb,) * 2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
 
348
+ if rotary_pos_emb is not None:
349
+ q_pos_emb, k_pos_emb = rotary_pos_emb
350
+ # Slice the pos emb for current inference
351
+ cur_len = query.shape[1]
352
+ q_pos_emb = q_pos_emb[:, -cur_len:, :, :]
353
+ k_pos_emb = k_pos_emb[:, -cur_len:, :, :]
354
+ query = apply_rotary_pos_emb(query, q_pos_emb)
355
+ key = apply_rotary_pos_emb(key, k_pos_emb)
356
 
357
  if layer_past is not None:
358
  past_key, past_value = layer_past[0], layer_past[1]
359
+ key = torch.cat((past_key, key), dim=1)
360
+ value = torch.cat((past_value, value), dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
361
 
362
  if use_cache:
363
  present = (key, value)
364
  else:
365
  present = None
366
 
367
+ if self.use_logn_attn and not self.training:
368
+ if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype:
369
+ self.logn_tensor = self.logn_tensor.to(query.device).type_as(query)
370
+ seq_start = key.size(1) - query.size(1)
371
+ seq_end = key.size(1)
372
+ logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :]
 
 
 
373
  query = query * logn_tensor.expand_as(query)
374
 
375
  if (
 
379
  and query.is_cuda
380
  ):
381
  q, k, v = query, key, value
382
+ context_layer = self.core_attention_flash(q, k, v)
383
+
384
+ context_layer = rearrange(
385
+ context_layer, "b s h d -> b s (h d)"
386
+ ).contiguous()
387
  else:
 
 
 
 
 
 
 
388
  query = query.permute(0, 2, 1, 3)
389
+ key = key.permute(0, 2, 1, 3)
390
+ value = value.permute(0, 2, 1, 3)
391
+ attn_output, attn_weight = self._attn(
392
+ query, key, value, attention_mask, head_mask
393
+ )
394
+ context_layer = self._merge_heads(
395
+ attn_output, self.num_heads, self.head_dim
396
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
 
398
  attn_output = self.c_proj(context_layer)
 
399
  outputs = (attn_output, present)
400
  if output_attentions:
401
  if (
 
404
  and not self.is_fp32
405
  ):
406
  raise ValueError("Cannot output attentions while using flash-attn")
 
 
407
  else:
408
  outputs += (attn_weight,)
409
 
 
414
  def __init__(self, config):
415
  super().__init__()
416
  self.w1 = nn.Linear(
417
+ config.hidden_size, config.ffn_hidden_size // 2, bias=not config.no_bias
418
  )
419
  self.w2 = nn.Linear(
420
+ config.hidden_size, config.ffn_hidden_size // 2, bias=not config.no_bias
421
  )
422
+ ff_dim_in = config.ffn_hidden_size // 2
423
  self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
424
 
425
  def forward(self, hidden_states):
 
431
 
432
 
433
  class QWenBlock(nn.Module):
434
+ def __init__(self, config, layer_idx=None, num_expert=1):
435
  super().__init__()
436
+ self.num_expert = num_expert
437
+ self.layer_number = layer_idx
438
+ self.apply_residual_connection_post_layernorm = (
439
+ config.apply_residual_connection_post_layernorm
440
+ )
441
  hidden_size = config.hidden_size
442
+ self.apply_residual_connection_post_layernorm = (
443
+ config.apply_residual_connection_post_layernorm
444
+ )
445
  self.bf16 = config.bf16
446
 
447
  self.ln_1 = RMSNorm(
448
  hidden_size,
449
  eps=config.layer_norm_epsilon,
450
  )
451
+ self.attn = QWenAttention(config, layer_number=layer_idx)
452
  self.ln_2 = RMSNorm(
453
  hidden_size,
454
  eps=config.layer_norm_epsilon,
 
459
  def forward(
460
  self,
461
  hidden_states: Optional[Tuple[torch.FloatTensor]],
 
462
  layer_past: Optional[Tuple[torch.Tensor]] = None,
463
  attention_mask: Optional[torch.FloatTensor] = None,
464
  head_mask: Optional[torch.FloatTensor] = None,
 
471
 
472
  attn_outputs = self.attn(
473
  layernorm_output,
 
474
  layer_past=layer_past,
475
  attention_mask=attention_mask,
476
  head_mask=head_mask,
 
481
 
482
  outputs = attn_outputs[1:]
483
 
484
+ if self.apply_residual_connection_post_layernorm:
485
+ residual = layernorm_output
486
+ else:
487
+ residual = hidden_states
488
  layernorm_input = attn_output + residual
489
 
490
  layernorm_output = self.ln_2(layernorm_input)
491
 
492
+ if self.apply_residual_connection_post_layernorm:
493
+ residual = layernorm_output
494
+ else:
495
+ residual = layernorm_input
496
+
497
  mlp_output = self.mlp(layernorm_output)
498
  hidden_states = residual + mlp_output
499
 
 
511
  is_parallelizable = False
512
  supports_gradient_checkpointing = True
513
  _no_split_modules = ["QWenBlock"]
 
514
 
515
  def __init__(self, *inputs, **kwargs):
516
  super().__init__(*inputs, **kwargs)
 
534
  mean=0.0,
535
  std=(
536
  self.config.initializer_range
537
+ / math.sqrt(2 * self.config.n_layer)
538
  ),
539
  )
540
 
 
548
 
549
  def __init__(self, config):
550
  super().__init__(config)
551
+ self.vocab_size = config.padded_vocab_size
552
  self.num_hidden_layers = config.num_hidden_layers
553
  self.embed_dim = config.hidden_size
 
554
 
555
+ max_sequence_length = config.max_position_embeddings
556
+ self.position_embedding_type = config.pos_emb
557
  self.gradient_checkpointing = False
 
 
 
 
 
 
558
 
559
+ if self.position_embedding_type == "learned":
560
+ self.wpe = nn.Embedding(max_sequence_length, self.embed_dim)
561
+ self.init_method(self.position_embeddings.weight)
562
+ self._position_embeddings_key = "position_embeddings"
563
+ self.init_method(self.position_embeddings.weight)
564
  else:
565
+ self.wpe = None
566
+ self._position_embeddings_key = ""
 
 
 
 
 
 
 
 
567
 
568
+ self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
 
569
 
570
+ self.drop = nn.Dropout(config.embd_pdrop)
571
  self.h = nn.ModuleList(
572
  [
573
  QWenBlock(
574
+ config,
575
+ layer_idx=i,
576
  )
577
  for i in range(config.num_hidden_layers)
578
  ]
 
590
  def set_input_embeddings(self, new_embeddings):
591
  self.wte = new_embeddings
592
 
 
 
 
 
 
 
593
  def forward(
594
  self,
595
  input_ids: Optional[torch.LongTensor] = None,
 
646
  past_length = 0
647
  past_key_values = tuple([None] * len(self.h))
648
  else:
649
+ past_length = past_key_values[0][0].size(-2)
650
+
 
 
651
  if position_ids is None:
652
  position_ids = torch.arange(
653
  past_length,
 
666
  attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
667
 
668
  encoder_attention_mask = None
669
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
670
 
671
  if inputs_embeds is None:
672
  inputs_embeds = self.wte(input_ids)
673
  hidden_states = inputs_embeds
674
+ if self.wpe is not None:
675
+ position_embeds = self.wpe(position_ids)
676
+ hidden_states = hidden_states + position_embeds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677
 
678
  hidden_states = self.drop(hidden_states)
679
  output_shape = input_shape + (hidden_states.size(-1),)
 
705
  outputs = torch.utils.checkpoint.checkpoint(
706
  create_custom_forward(block),
707
  hidden_states,
 
708
  None,
709
  attention_mask,
710
  head_mask[i],
 
715
  outputs = block(
716
  hidden_states,
717
  layer_past=layer_past,
 
718
  attention_mask=attention_mask,
719
  head_mask=head_mask[i],
720
  encoder_hidden_states=encoder_hidden_states,
 
725
 
726
  hidden_states = outputs[0]
727
  if use_cache is True:
728
+ presents = presents + (outputs[2 if output_attentions else 1],)
729
 
730
  if output_attentions:
731
+ all_self_attentions = all_self_attentions + (outputs[1],)
732
 
733
  hidden_states = self.ln_f(hidden_states)
734
  hidden_states = hidden_states.view(output_shape)
 
 
 
735
 
736
  if not return_dict:
737
  return tuple(
 
783
  logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
784
  elif SUPPORT_FP16:
785
  logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
786
+
787
  if config.use_flash_attn == "auto":
788
  if config.bf16 or config.fp16:
789
  logger.warn("Try importing flash-attention for faster inference...")
 
794
  logger.warn("Flash attention will be disabled because it does NOT support fp32.")
795
 
796
  if config.use_flash_attn:
797
+ global apply_rotary_emb_func, rms_norm, flash_attn_unpadded_func
798
+ try:
799
+ from flash_attn.layers.rotary import apply_rotary_emb_func as __apply_rotary_emb_func
800
+ apply_rotary_emb_func = __apply_rotary_emb_func
801
+ except ImportError:
802
+ logger.warn(
803
+ "Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency "
804
+ "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary"
805
+ )
806
+
807
+ try:
808
+ from flash_attn.ops.rms_norm import rms_norm as __rms_norm
809
+ rms_norm = __rms_norm
810
+ except ImportError:
811
+ logger.warn(
812
+ "Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency "
813
+ "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm"
814
+ )
815
+
816
+ try:
817
+ from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
818
+ flash_attn_unpadded_func = __flash_attn_unpadded_func
819
+ except ImportError:
820
+ logger.warn(
821
+ "Warning: import flash_attn fail, please install FlashAttention to get higher efficiency "
822
+ "https://github.com/Dao-AILab/flash-attention"
823
+ )
824
 
825
  self.transformer = QWenModel(config)
826
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
827
 
828
  if config.bf16:
829
  self.transformer.bfloat16()
 
842
  def prepare_inputs_for_generation(
843
  self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
844
  ):
845
+ token_type_ids = kwargs.get("token_type_ids", None)
846
  if past_key_values:
847
  input_ids = input_ids[:, -1].unsqueeze(-1)
848
+ if token_type_ids is not None:
849
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
850
+
851
+ attention_mask = kwargs.get("attention_mask", None)
852
+ position_ids = kwargs.get("position_ids", None)
853
 
854
+ if attention_mask is not None and position_ids is None:
855
+ position_ids = attention_mask.long().cumsum(-1) - 1
856
+ position_ids.masked_fill_(attention_mask == 0, 1)
857
+ if past_key_values:
858
+ position_ids = position_ids[:, -1].unsqueeze(-1)
859
  else:
860
+ position_ids = None
861
 
862
  if inputs_embeds is not None and past_key_values is None:
863
  model_inputs = {"inputs_embeds": inputs_embeds}
 
868
  {
869
  "past_key_values": past_key_values,
870
  "use_cache": kwargs.get("use_cache"),
871
+ "position_ids": position_ids,
872
  "attention_mask": attention_mask,
873
+ "token_type_ids": token_type_ids,
874
  }
875
  )
876
  return model_inputs
 
957
  query: str,
958
  history: Optional[HistoryType],
959
  system: str = "You are a helpful assistant.",
960
+ append_history: bool = True,
961
+ stream: Optional[bool] = False,
962
  stop_words_ids: Optional[List[List[int]]] = None,
 
963
  **kwargs,
964
  ) -> Tuple[str, HistoryType]:
 
 
 
 
965
  if history is None:
966
  history = []
 
 
 
 
967
  if stop_words_ids is None:
968
  stop_words_ids = []
969
 
 
 
 
970
  raw_text, context_tokens = make_context(
971
  tokenizer,
972
  query,
973
  history=history,
974
  system=system,
975
+ max_window_size=6144,
976
+ chat_format=self.generation_config.chat_format,
977
  )
978
 
979
  stop_words_ids.extend(get_stop_words_ids(
980
+ self.generation_config.chat_format, tokenizer
981
  ))
982
  input_ids = torch.tensor([context_tokens]).to(self.device)
983
+ if stream:
984
+ assert self.generation_config.chat_format == 'chatml'
985
+ from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
986
+ self.__class__.generate = NewGenerationMixin.generate
987
+ self.__class__.sample_stream = NewGenerationMixin.sample_stream
988
+ stream_config = StreamGenerationConfig(**self.generation_config.to_dict(), do_stream=True)
989
+ def stream_generator():
990
+ outputs = []
991
+ for token in self.generate(
992
+ input_ids, return_dict_in_generate=False, generation_config=stream_config, **kwargs):
993
+ outputs.append(token.item())
994
+ if outputs[-1] in (tokenizer.im_end_id, tokenizer.im_start_id):
995
+ break
996
+ yield tokenizer.decode(outputs, skip_special_tokens=True)
997
+
998
+ return stream_generator()
999
+ else:
1000
+ outputs = self.generate(
1001
+ input_ids,
1002
+ stop_words_ids = stop_words_ids,
1003
+ return_dict_in_generate = False,
1004
+ **kwargs,
1005
+ )
1006
+
1007
+ response = decode_tokens(
1008
+ outputs[0],
1009
+ tokenizer,
1010
+ raw_text_len=len(raw_text),
1011
+ context_length=len(context_tokens),
1012
+ chat_format=self.generation_config.chat_format,
1013
+ verbose=False,
1014
+ )
1015
 
1016
+ if append_history:
1017
+ history.append((query, response))
 
 
 
1018
 
1019
  return response, history
1020
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1021
  def generate(
1022
  self,
1023
  inputs: Optional[torch.Tensor] = None,
 
1028
  Callable[[int, torch.Tensor], List[int]]
1029
  ] = None,
1030
  synced_gpus: Optional[bool] = None,
 
1031
  streamer: Optional["BaseStreamer"] = None,
1032
  **kwargs,
1033
  ) -> Union[GenerateOutput, torch.LongTensor]:
 
 
1034
  # Process stop_words_ids.
1035
  stop_words_ids = kwargs.pop("stop_words_ids", None)
1036
  if stop_words_ids is None and generation_config is not None:
1037
  stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1038
  if stop_words_ids is None:
1039
+ stop_words_ids = getattr(self.generation_config, "stop_words_ids", None)
1040
 
1041
  if stop_words_ids is not None:
1042
  stop_words_logits_processor = StopWordsLogitsProcessor(
1043
  stop_words_ids=stop_words_ids,
1044
+ eos_token_id=self.generation_config.eos_token_id,
1045
  )
1046
  if logits_processor is None:
1047
  logits_processor = LogitsProcessorList([stop_words_logits_processor])
 
1050
 
1051
  return super().generate(
1052
  inputs,
1053
+ generation_config,
1054
+ logits_processor,
1055
+ stopping_criteria,
1056
+ prefix_allowed_tokens_fn,
1057
+ synced_gpus,
1058
+ streamer,
 
1059
  **kwargs,
1060
  )
1061
 
 
1065
  super().__init__()
1066
  self.dim = dim
1067
  self.base = base
1068
+ self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
 
1069
  if importlib.util.find_spec("einops") is None:
1070
  raise RuntimeError("einops is required for Rotary Embedding")
1071
 
1072
  self._rotary_pos_emb_cache = None
1073
  self._seq_len_cached = 0
1074
  self._ntk_alpha_cached = 1.0
 
1075
 
1076
+ def update_rotary_pos_emb_cache(self, max_seq_len, offset=0, ntk_alpha=1.0):
1077
+ seqlen = max_seq_len + offset
1078
  if seqlen > self._seq_len_cached or ntk_alpha != self._ntk_alpha_cached:
1079
  base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
1080
  self.inv_freq = 1.0 / (
 
1084
  / self.dim
1085
  )
1086
  )
1087
+ self._seq_len_cached = seqlen
1088
  self._ntk_alpha_cached = ntk_alpha
1089
+ seq = torch.arange(seqlen, device=self.inv_freq.device)
1090
  freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
 
1091
  emb = torch.cat((freqs, freqs), dim=-1)
1092
  from einops import rearrange
1093
 
1094
+ self._rotary_pos_emb_cache = rearrange(emb, "n d -> 1 n 1 d")
 
 
 
1095
 
1096
+ def forward(self, max_seq_len, offset=0, ntk_alpha=1.0):
1097
+ self.update_rotary_pos_emb_cache(max_seq_len, offset, ntk_alpha)
1098
+ return self._rotary_pos_emb_cache[:, offset : offset + max_seq_len]
 
1099
 
1100
 
1101
  def _rotate_half(x):
 
1107
 
1108
 
1109
  def apply_rotary_pos_emb(t, freqs):
1110
+ if apply_rotary_emb_func is not None:
1111
+ t_ = t.float()
1112
+ freqs = freqs.squeeze(0).squeeze(1)
1113
+ cos = freqs[:, : freqs.shape[-1] // 2].cos()
1114
+ sin = freqs[:, : freqs.shape[-1] // 2].sin()
1115
+ output = apply_rotary_emb_func(t_, cos, sin).type_as(t)
1116
+ return output
 
 
 
 
 
 
 
 
 
 
 
1117
  else:
1118
+ rot_dim = freqs.shape[-1]
1119
+ t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]
1120
+ t_ = t_.float()
1121
+ t_pass_ = t_pass_.float()
1122
+ t_ = (t_ * freqs.cos()) + (_rotate_half(t_) * freqs.sin())
1123
+ return torch.cat((t_, t_pass_), dim=-1).type_as(t)
1124
 
1125
 
1126
  class RMSNorm(torch.nn.Module):
model-00002-of-00008.safetensors → pytorch_model-00001-of-00008.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63384af60dff14f3655142543b97372e144a0e3c238579984a9723ad9a4e676d
3
- size 2023960808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33296fd065504ad10d93f4fbe8635f33e81885c0c2ac0396004f82770dd781ee
3
+ size 1964070447
model-00003-of-00008.safetensors → pytorch_model-00002-of-00008.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2d011ce1557d43fdbc8d57fa4bbc7f1b7209155060984461c3513a6b9fbfcbc
3
- size 2023960816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2745ae50fffd4194e643f965183a29a9abc7595e8ee358d6311f0c81d5ab3fe0
3
+ size 2023970015
model-00004-of-00008.safetensors → pytorch_model-00003-of-00008.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c374149caf74e88d228ba50bfcbfcf11004e4b16bb17c830864baa29c5ddc02
3
- size 2023960848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee11ea92b22d04b18fdf2bcedb8a4189db41b0e9359332444bdb406cca7abcfe
3
+ size 2023970079
pytorch_model-00004-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d6d68e5056982035547049e14d7df29e67c8b813bcf8faa520484022893ea3a
3
+ size 2023970079
pytorch_model-00005-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc53af5f365b1a364cf207d3c42e14e654889daf079e382332f055b715488794
3
+ size 2023970079
pytorch_model-00006-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:800f774aa1839e08f6d25da50e020dc6264539bbbdf691665409d92ae70656c8
3
+ size 2023970079
pytorch_model-00007-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99940cb4d10844d342f8f061d68fb317fd9e45d2a8903ab8aa5e3191a3a320d8
3
+ size 2023970079
pytorch_model-00008-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05be1d470d28513846fcbf0ccd75224bb4bf5b15ec3162305fddece503b0bcf8
3
+ size 1334846988
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15442649088
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00008-of-00008.bin",
7
+ "transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00008.bin",
8
+ "transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00008.bin",
9
+ "transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00008.bin",
10
+ "transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00008.bin",
11
+ "transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00008.bin",
12
+ "transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00008.bin",
13
+ "transformer.h.0.mlp.w1.weight": "pytorch_model-00001-of-00008.bin",
14
+ "transformer.h.0.mlp.w2.weight": "pytorch_model-00001-of-00008.bin",
15
+ "transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00008.bin",
16
+ "transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00008.bin",
17
+ "transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00008.bin",
18
+ "transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00008.bin",
19
+ "transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00008.bin",
20
+ "transformer.h.1.mlp.c_proj.weight": "pytorch_model-00002-of-00008.bin",
21
+ "transformer.h.1.mlp.w1.weight": "pytorch_model-00001-of-00008.bin",
22
+ "transformer.h.1.mlp.w2.weight": "pytorch_model-00001-of-00008.bin",
23
+ "transformer.h.10.attn.c_attn.bias": "pytorch_model-00003-of-00008.bin",
24
+ "transformer.h.10.attn.c_attn.weight": "pytorch_model-00003-of-00008.bin",
25
+ "transformer.h.10.attn.c_proj.weight": "pytorch_model-00003-of-00008.bin",
26
+ "transformer.h.10.ln_1.weight": "pytorch_model-00003-of-00008.bin",
27
+ "transformer.h.10.ln_2.weight": "pytorch_model-00003-of-00008.bin",
28
+ "transformer.h.10.mlp.c_proj.weight": "pytorch_model-00003-of-00008.bin",
29
+ "transformer.h.10.mlp.w1.weight": "pytorch_model-00003-of-00008.bin",
30
+ "transformer.h.10.mlp.w2.weight": "pytorch_model-00003-of-00008.bin",
31
+ "transformer.h.11.attn.c_attn.bias": "pytorch_model-00003-of-00008.bin",
32
+ "transformer.h.11.attn.c_attn.weight": "pytorch_model-00003-of-00008.bin",
33
+ "transformer.h.11.attn.c_proj.weight": "pytorch_model-00003-of-00008.bin",
34
+ "transformer.h.11.ln_1.weight": "pytorch_model-00003-of-00008.bin",
35
+ "transformer.h.11.ln_2.weight": "pytorch_model-00003-of-00008.bin",
36
+ "transformer.h.11.mlp.c_proj.weight": "pytorch_model-00004-of-00008.bin",
37
+ "transformer.h.11.mlp.w1.weight": "pytorch_model-00003-of-00008.bin",
38
+ "transformer.h.11.mlp.w2.weight": "pytorch_model-00003-of-00008.bin",
39
+ "transformer.h.12.attn.c_attn.bias": "pytorch_model-00004-of-00008.bin",
40
+ "transformer.h.12.attn.c_attn.weight": "pytorch_model-00004-of-00008.bin",
41
+ "transformer.h.12.attn.c_proj.weight": "pytorch_model-00004-of-00008.bin",
42
+ "transformer.h.12.ln_1.weight": "pytorch_model-00004-of-00008.bin",
43
+ "transformer.h.12.ln_2.weight": "pytorch_model-00004-of-00008.bin",
44
+ "transformer.h.12.mlp.c_proj.weight": "pytorch_model-00004-of-00008.bin",
45
+ "transformer.h.12.mlp.w1.weight": "pytorch_model-00004-of-00008.bin",
46
+ "transformer.h.12.mlp.w2.weight": "pytorch_model-00004-of-00008.bin",
47
+ "transformer.h.13.attn.c_attn.bias": "pytorch_model-00004-of-00008.bin",
48
+ "transformer.h.13.attn.c_attn.weight": "pytorch_model-00004-of-00008.bin",
49
+ "transformer.h.13.attn.c_proj.weight": "pytorch_model-00004-of-00008.bin",
50
+ "transformer.h.13.ln_1.weight": "pytorch_model-00004-of-00008.bin",
51
+ "transformer.h.13.ln_2.weight": "pytorch_model-00004-of-00008.bin",
52
+ "transformer.h.13.mlp.c_proj.weight": "pytorch_model-00004-of-00008.bin",
53
+ "transformer.h.13.mlp.w1.weight": "pytorch_model-00004-of-00008.bin",
54
+ "transformer.h.13.mlp.w2.weight": "pytorch_model-00004-of-00008.bin",
55
+ "transformer.h.14.attn.c_attn.bias": "pytorch_model-00004-of-00008.bin",
56
+ "transformer.h.14.attn.c_attn.weight": "pytorch_model-00004-of-00008.bin",
57
+ "transformer.h.14.attn.c_proj.weight": "pytorch_model-00004-of-00008.bin",
58
+ "transformer.h.14.ln_1.weight": "pytorch_model-00004-of-00008.bin",
59
+ "transformer.h.14.ln_2.weight": "pytorch_model-00004-of-00008.bin",
60
+ "transformer.h.14.mlp.c_proj.weight": "pytorch_model-00004-of-00008.bin",
61
+ "transformer.h.14.mlp.w1.weight": "pytorch_model-00004-of-00008.bin",
62
+ "transformer.h.14.mlp.w2.weight": "pytorch_model-00004-of-00008.bin",
63
+ "transformer.h.15.attn.c_attn.bias": "pytorch_model-00004-of-00008.bin",
64
+ "transformer.h.15.attn.c_attn.weight": "pytorch_model-00004-of-00008.bin",
65
+ "transformer.h.15.attn.c_proj.weight": "pytorch_model-00004-of-00008.bin",
66
+ "transformer.h.15.ln_1.weight": "pytorch_model-00004-of-00008.bin",
67
+ "transformer.h.15.ln_2.weight": "pytorch_model-00004-of-00008.bin",
68
+ "transformer.h.15.mlp.c_proj.weight": "pytorch_model-00004-of-00008.bin",
69
+ "transformer.h.15.mlp.w1.weight": "pytorch_model-00004-of-00008.bin",
70
+ "transformer.h.15.mlp.w2.weight": "pytorch_model-00004-of-00008.bin",
71
+ "transformer.h.16.attn.c_attn.bias": "pytorch_model-00004-of-00008.bin",
72
+ "transformer.h.16.attn.c_attn.weight": "pytorch_model-00004-of-00008.bin",
73
+ "transformer.h.16.attn.c_proj.weight": "pytorch_model-00004-of-00008.bin",
74
+ "transformer.h.16.ln_1.weight": "pytorch_model-00004-of-00008.bin",
75
+ "transformer.h.16.ln_2.weight": "pytorch_model-00004-of-00008.bin",
76
+ "transformer.h.16.mlp.c_proj.weight": "pytorch_model-00005-of-00008.bin",
77
+ "transformer.h.16.mlp.w1.weight": "pytorch_model-00004-of-00008.bin",
78
+ "transformer.h.16.mlp.w2.weight": "pytorch_model-00004-of-00008.bin",
79
+ "transformer.h.17.attn.c_attn.bias": "pytorch_model-00005-of-00008.bin",
80
+ "transformer.h.17.attn.c_attn.weight": "pytorch_model-00005-of-00008.bin",
81
+ "transformer.h.17.attn.c_proj.weight": "pytorch_model-00005-of-00008.bin",
82
+ "transformer.h.17.ln_1.weight": "pytorch_model-00005-of-00008.bin",
83
+ "transformer.h.17.ln_2.weight": "pytorch_model-00005-of-00008.bin",
84
+ "transformer.h.17.mlp.c_proj.weight": "pytorch_model-00005-of-00008.bin",
85
+ "transformer.h.17.mlp.w1.weight": "pytorch_model-00005-of-00008.bin",
86
+ "transformer.h.17.mlp.w2.weight": "pytorch_model-00005-of-00008.bin",
87
+ "transformer.h.18.attn.c_attn.bias": "pytorch_model-00005-of-00008.bin",
88
+ "transformer.h.18.attn.c_attn.weight": "pytorch_model-00005-of-00008.bin",
89
+ "transformer.h.18.attn.c_proj.weight": "pytorch_model-00005-of-00008.bin",
90
+ "transformer.h.18.ln_1.weight": "pytorch_model-00005-of-00008.bin",
91
+ "transformer.h.18.ln_2.weight": "pytorch_model-00005-of-00008.bin",
92
+ "transformer.h.18.mlp.c_proj.weight": "pytorch_model-00005-of-00008.bin",
93
+ "transformer.h.18.mlp.w1.weight": "pytorch_model-00005-of-00008.bin",
94
+ "transformer.h.18.mlp.w2.weight": "pytorch_model-00005-of-00008.bin",
95
+ "transformer.h.19.attn.c_attn.bias": "pytorch_model-00005-of-00008.bin",
96
+ "transformer.h.19.attn.c_attn.weight": "pytorch_model-00005-of-00008.bin",
97
+ "transformer.h.19.attn.c_proj.weight": "pytorch_model-00005-of-00008.bin",
98
+ "transformer.h.19.ln_1.weight": "pytorch_model-00005-of-00008.bin",
99
+ "transformer.h.19.ln_2.weight": "pytorch_model-00005-of-00008.bin",
100
+ "transformer.h.19.mlp.c_proj.weight": "pytorch_model-00005-of-00008.bin",
101
+ "transformer.h.19.mlp.w1.weight": "pytorch_model-00005-of-00008.bin",
102
+ "transformer.h.19.mlp.w2.weight": "pytorch_model-00005-of-00008.bin",
103
+ "transformer.h.2.attn.c_attn.bias": "pytorch_model-00002-of-00008.bin",
104
+ "transformer.h.2.attn.c_attn.weight": "pytorch_model-00002-of-00008.bin",
105
+ "transformer.h.2.attn.c_proj.weight": "pytorch_model-00002-of-00008.bin",
106
+ "transformer.h.2.ln_1.weight": "pytorch_model-00002-of-00008.bin",
107
+ "transformer.h.2.ln_2.weight": "pytorch_model-00002-of-00008.bin",
108
+ "transformer.h.2.mlp.c_proj.weight": "pytorch_model-00002-of-00008.bin",
109
+ "transformer.h.2.mlp.w1.weight": "pytorch_model-00002-of-00008.bin",
110
+ "transformer.h.2.mlp.w2.weight": "pytorch_model-00002-of-00008.bin",
111
+ "transformer.h.20.attn.c_attn.bias": "pytorch_model-00005-of-00008.bin",
112
+ "transformer.h.20.attn.c_attn.weight": "pytorch_model-00005-of-00008.bin",
113
+ "transformer.h.20.attn.c_proj.weight": "pytorch_model-00005-of-00008.bin",
114
+ "transformer.h.20.ln_1.weight": "pytorch_model-00005-of-00008.bin",
115
+ "transformer.h.20.ln_2.weight": "pytorch_model-00005-of-00008.bin",
116
+ "transformer.h.20.mlp.c_proj.weight": "pytorch_model-00005-of-00008.bin",
117
+ "transformer.h.20.mlp.w1.weight": "pytorch_model-00005-of-00008.bin",
118
+ "transformer.h.20.mlp.w2.weight": "pytorch_model-00005-of-00008.bin",
119
+ "transformer.h.21.attn.c_attn.bias": "pytorch_model-00005-of-00008.bin",
120
+ "transformer.h.21.attn.c_attn.weight": "pytorch_model-00005-of-00008.bin",
121
+ "transformer.h.21.attn.c_proj.weight": "pytorch_model-00005-of-00008.bin",
122
+ "transformer.h.21.ln_1.weight": "pytorch_model-00005-of-00008.bin",
123
+ "transformer.h.21.ln_2.weight": "pytorch_model-00005-of-00008.bin",
124
+ "transformer.h.21.mlp.c_proj.weight": "pytorch_model-00006-of-00008.bin",
125
+ "transformer.h.21.mlp.w1.weight": "pytorch_model-00005-of-00008.bin",
126
+ "transformer.h.21.mlp.w2.weight": "pytorch_model-00005-of-00008.bin",
127
+ "transformer.h.22.attn.c_attn.bias": "pytorch_model-00006-of-00008.bin",
128
+ "transformer.h.22.attn.c_attn.weight": "pytorch_model-00006-of-00008.bin",
129
+ "transformer.h.22.attn.c_proj.weight": "pytorch_model-00006-of-00008.bin",
130
+ "transformer.h.22.ln_1.weight": "pytorch_model-00006-of-00008.bin",
131
+ "transformer.h.22.ln_2.weight": "pytorch_model-00006-of-00008.bin",
132
+ "transformer.h.22.mlp.c_proj.weight": "pytorch_model-00006-of-00008.bin",
133
+ "transformer.h.22.mlp.w1.weight": "pytorch_model-00006-of-00008.bin",
134
+ "transformer.h.22.mlp.w2.weight": "pytorch_model-00006-of-00008.bin",
135
+ "transformer.h.23.attn.c_attn.bias": "pytorch_model-00006-of-00008.bin",
136
+ "transformer.h.23.attn.c_attn.weight": "pytorch_model-00006-of-00008.bin",
137
+ "transformer.h.23.attn.c_proj.weight": "pytorch_model-00006-of-00008.bin",
138
+ "transformer.h.23.ln_1.weight": "pytorch_model-00006-of-00008.bin",
139
+ "transformer.h.23.ln_2.weight": "pytorch_model-00006-of-00008.bin",
140
+ "transformer.h.23.mlp.c_proj.weight": "pytorch_model-00006-of-00008.bin",
141
+ "transformer.h.23.mlp.w1.weight": "pytorch_model-00006-of-00008.bin",
142
+ "transformer.h.23.mlp.w2.weight": "pytorch_model-00006-of-00008.bin",
143
+ "transformer.h.24.attn.c_attn.bias": "pytorch_model-00006-of-00008.bin",
144
+ "transformer.h.24.attn.c_attn.weight": "pytorch_model-00006-of-00008.bin",
145
+ "transformer.h.24.attn.c_proj.weight": "pytorch_model-00006-of-00008.bin",
146
+ "transformer.h.24.ln_1.weight": "pytorch_model-00006-of-00008.bin",
147
+ "transformer.h.24.ln_2.weight": "pytorch_model-00006-of-00008.bin",
148
+ "transformer.h.24.mlp.c_proj.weight": "pytorch_model-00006-of-00008.bin",
149
+ "transformer.h.24.mlp.w1.weight": "pytorch_model-00006-of-00008.bin",
150
+ "transformer.h.24.mlp.w2.weight": "pytorch_model-00006-of-00008.bin",
151
+ "transformer.h.25.attn.c_attn.bias": "pytorch_model-00006-of-00008.bin",
152
+ "transformer.h.25.attn.c_attn.weight": "pytorch_model-00006-of-00008.bin",
153
+ "transformer.h.25.attn.c_proj.weight": "pytorch_model-00006-of-00008.bin",
154
+ "transformer.h.25.ln_1.weight": "pytorch_model-00006-of-00008.bin",
155
+ "transformer.h.25.ln_2.weight": "pytorch_model-00006-of-00008.bin",
156
+ "transformer.h.25.mlp.c_proj.weight": "pytorch_model-00006-of-00008.bin",
157
+ "transformer.h.25.mlp.w1.weight": "pytorch_model-00006-of-00008.bin",
158
+ "transformer.h.25.mlp.w2.weight": "pytorch_model-00006-of-00008.bin",
159
+ "transformer.h.26.attn.c_attn.bias": "pytorch_model-00006-of-00008.bin",
160
+ "transformer.h.26.attn.c_attn.weight": "pytorch_model-00006-of-00008.bin",
161
+ "transformer.h.26.attn.c_proj.weight": "pytorch_model-00006-of-00008.bin",
162
+ "transformer.h.26.ln_1.weight": "pytorch_model-00006-of-00008.bin",
163
+ "transformer.h.26.ln_2.weight": "pytorch_model-00006-of-00008.bin",
164
+ "transformer.h.26.mlp.c_proj.weight": "pytorch_model-00007-of-00008.bin",
165
+ "transformer.h.26.mlp.w1.weight": "pytorch_model-00006-of-00008.bin",
166
+ "transformer.h.26.mlp.w2.weight": "pytorch_model-00006-of-00008.bin",
167
+ "transformer.h.27.attn.c_attn.bias": "pytorch_model-00007-of-00008.bin",
168
+ "transformer.h.27.attn.c_attn.weight": "pytorch_model-00007-of-00008.bin",
169
+ "transformer.h.27.attn.c_proj.weight": "pytorch_model-00007-of-00008.bin",
170
+ "transformer.h.27.ln_1.weight": "pytorch_model-00007-of-00008.bin",
171
+ "transformer.h.27.ln_2.weight": "pytorch_model-00007-of-00008.bin",
172
+ "transformer.h.27.mlp.c_proj.weight": "pytorch_model-00007-of-00008.bin",
173
+ "transformer.h.27.mlp.w1.weight": "pytorch_model-00007-of-00008.bin",
174
+ "transformer.h.27.mlp.w2.weight": "pytorch_model-00007-of-00008.bin",
175
+ "transformer.h.28.attn.c_attn.bias": "pytorch_model-00007-of-00008.bin",
176
+ "transformer.h.28.attn.c_attn.weight": "pytorch_model-00007-of-00008.bin",
177
+ "transformer.h.28.attn.c_proj.weight": "pytorch_model-00007-of-00008.bin",
178
+ "transformer.h.28.ln_1.weight": "pytorch_model-00007-of-00008.bin",
179
+ "transformer.h.28.ln_2.weight": "pytorch_model-00007-of-00008.bin",
180
+ "transformer.h.28.mlp.c_proj.weight": "pytorch_model-00007-of-00008.bin",
181
+ "transformer.h.28.mlp.w1.weight": "pytorch_model-00007-of-00008.bin",
182
+ "transformer.h.28.mlp.w2.weight": "pytorch_model-00007-of-00008.bin",
183
+ "transformer.h.29.attn.c_attn.bias": "pytorch_model-00007-of-00008.bin",
184
+ "transformer.h.29.attn.c_attn.weight": "pytorch_model-00007-of-00008.bin",
185
+ "transformer.h.29.attn.c_proj.weight": "pytorch_model-00007-of-00008.bin",
186
+ "transformer.h.29.ln_1.weight": "pytorch_model-00007-of-00008.bin",
187
+ "transformer.h.29.ln_2.weight": "pytorch_model-00007-of-00008.bin",
188
+ "transformer.h.29.mlp.c_proj.weight": "pytorch_model-00007-of-00008.bin",
189
+ "transformer.h.29.mlp.w1.weight": "pytorch_model-00007-of-00008.bin",
190
+ "transformer.h.29.mlp.w2.weight": "pytorch_model-00007-of-00008.bin",
191
+ "transformer.h.3.attn.c_attn.bias": "pytorch_model-00002-of-00008.bin",
192
+ "transformer.h.3.attn.c_attn.weight": "pytorch_model-00002-of-00008.bin",
193
+ "transformer.h.3.attn.c_proj.weight": "pytorch_model-00002-of-00008.bin",
194
+ "transformer.h.3.ln_1.weight": "pytorch_model-00002-of-00008.bin",
195
+ "transformer.h.3.ln_2.weight": "pytorch_model-00002-of-00008.bin",
196
+ "transformer.h.3.mlp.c_proj.weight": "pytorch_model-00002-of-00008.bin",
197
+ "transformer.h.3.mlp.w1.weight": "pytorch_model-00002-of-00008.bin",
198
+ "transformer.h.3.mlp.w2.weight": "pytorch_model-00002-of-00008.bin",
199
+ "transformer.h.30.attn.c_attn.bias": "pytorch_model-00007-of-00008.bin",
200
+ "transformer.h.30.attn.c_attn.weight": "pytorch_model-00007-of-00008.bin",
201
+ "transformer.h.30.attn.c_proj.weight": "pytorch_model-00007-of-00008.bin",
202
+ "transformer.h.30.ln_1.weight": "pytorch_model-00007-of-00008.bin",
203
+ "transformer.h.30.ln_2.weight": "pytorch_model-00007-of-00008.bin",
204
+ "transformer.h.30.mlp.c_proj.weight": "pytorch_model-00007-of-00008.bin",
205
+ "transformer.h.30.mlp.w1.weight": "pytorch_model-00007-of-00008.bin",
206
+ "transformer.h.30.mlp.w2.weight": "pytorch_model-00007-of-00008.bin",
207
+ "transformer.h.31.attn.c_attn.bias": "pytorch_model-00007-of-00008.bin",
208
+ "transformer.h.31.attn.c_attn.weight": "pytorch_model-00007-of-00008.bin",
209
+ "transformer.h.31.attn.c_proj.weight": "pytorch_model-00007-of-00008.bin",
210
+ "transformer.h.31.ln_1.weight": "pytorch_model-00007-of-00008.bin",
211
+ "transformer.h.31.ln_2.weight": "pytorch_model-00007-of-00008.bin",
212
+ "transformer.h.31.mlp.c_proj.weight": "pytorch_model-00008-of-00008.bin",
213
+ "transformer.h.31.mlp.w1.weight": "pytorch_model-00007-of-00008.bin",
214
+ "transformer.h.31.mlp.w2.weight": "pytorch_model-00007-of-00008.bin",
215
+ "transformer.h.4.attn.c_attn.bias": "pytorch_model-00002-of-00008.bin",
216
+ "transformer.h.4.attn.c_attn.weight": "pytorch_model-00002-of-00008.bin",
217
+ "transformer.h.4.attn.c_proj.weight": "pytorch_model-00002-of-00008.bin",
218
+ "transformer.h.4.ln_1.weight": "pytorch_model-00002-of-00008.bin",
219
+ "transformer.h.4.ln_2.weight": "pytorch_model-00002-of-00008.bin",
220
+ "transformer.h.4.mlp.c_proj.weight": "pytorch_model-00002-of-00008.bin",
221
+ "transformer.h.4.mlp.w1.weight": "pytorch_model-00002-of-00008.bin",
222
+ "transformer.h.4.mlp.w2.weight": "pytorch_model-00002-of-00008.bin",
223
+ "transformer.h.5.attn.c_attn.bias": "pytorch_model-00002-of-00008.bin",
224
+ "transformer.h.5.attn.c_attn.weight": "pytorch_model-00002-of-00008.bin",
225
+ "transformer.h.5.attn.c_proj.weight": "pytorch_model-00002-of-00008.bin",
226
+ "transformer.h.5.ln_1.weight": "pytorch_model-00002-of-00008.bin",
227
+ "transformer.h.5.ln_2.weight": "pytorch_model-00002-of-00008.bin",
228
+ "transformer.h.5.mlp.c_proj.weight": "pytorch_model-00002-of-00008.bin",
229
+ "transformer.h.5.mlp.w1.weight": "pytorch_model-00002-of-00008.bin",
230
+ "transformer.h.5.mlp.w2.weight": "pytorch_model-00002-of-00008.bin",
231
+ "transformer.h.6.attn.c_attn.bias": "pytorch_model-00002-of-00008.bin",
232
+ "transformer.h.6.attn.c_attn.weight": "pytorch_model-00002-of-00008.bin",
233
+ "transformer.h.6.attn.c_proj.weight": "pytorch_model-00002-of-00008.bin",
234
+ "transformer.h.6.ln_1.weight": "pytorch_model-00002-of-00008.bin",
235
+ "transformer.h.6.ln_2.weight": "pytorch_model-00002-of-00008.bin",
236
+ "transformer.h.6.mlp.c_proj.weight": "pytorch_model-00003-of-00008.bin",
237
+ "transformer.h.6.mlp.w1.weight": "pytorch_model-00002-of-00008.bin",
238
+ "transformer.h.6.mlp.w2.weight": "pytorch_model-00002-of-00008.bin",
239
+ "transformer.h.7.attn.c_attn.bias": "pytorch_model-00003-of-00008.bin",
240
+ "transformer.h.7.attn.c_attn.weight": "pytorch_model-00003-of-00008.bin",
241
+ "transformer.h.7.attn.c_proj.weight": "pytorch_model-00003-of-00008.bin",
242
+ "transformer.h.7.ln_1.weight": "pytorch_model-00003-of-00008.bin",
243
+ "transformer.h.7.ln_2.weight": "pytorch_model-00003-of-00008.bin",
244
+ "transformer.h.7.mlp.c_proj.weight": "pytorch_model-00003-of-00008.bin",
245
+ "transformer.h.7.mlp.w1.weight": "pytorch_model-00003-of-00008.bin",
246
+ "transformer.h.7.mlp.w2.weight": "pytorch_model-00003-of-00008.bin",
247
+ "transformer.h.8.attn.c_attn.bias": "pytorch_model-00003-of-00008.bin",
248
+ "transformer.h.8.attn.c_attn.weight": "pytorch_model-00003-of-00008.bin",
249
+ "transformer.h.8.attn.c_proj.weight": "pytorch_model-00003-of-00008.bin",
250
+ "transformer.h.8.ln_1.weight": "pytorch_model-00003-of-00008.bin",
251
+ "transformer.h.8.ln_2.weight": "pytorch_model-00003-of-00008.bin",
252
+ "transformer.h.8.mlp.c_proj.weight": "pytorch_model-00003-of-00008.bin",
253
+ "transformer.h.8.mlp.w1.weight": "pytorch_model-00003-of-00008.bin",
254
+ "transformer.h.8.mlp.w2.weight": "pytorch_model-00003-of-00008.bin",
255
+ "transformer.h.9.attn.c_attn.bias": "pytorch_model-00003-of-00008.bin",
256
+ "transformer.h.9.attn.c_attn.weight": "pytorch_model-00003-of-00008.bin",
257
+ "transformer.h.9.attn.c_proj.weight": "pytorch_model-00003-of-00008.bin",
258
+ "transformer.h.9.ln_1.weight": "pytorch_model-00003-of-00008.bin",
259
+ "transformer.h.9.ln_2.weight": "pytorch_model-00003-of-00008.bin",
260
+ "transformer.h.9.mlp.c_proj.weight": "pytorch_model-00003-of-00008.bin",
261
+ "transformer.h.9.mlp.w1.weight": "pytorch_model-00003-of-00008.bin",
262
+ "transformer.h.9.mlp.w2.weight": "pytorch_model-00003-of-00008.bin",
263
+ "transformer.ln_f.weight": "pytorch_model-00008-of-00008.bin",
264
+ "transformer.wte.weight": "pytorch_model-00001-of-00008.bin"
265
+ }
266
+ }
qwen_generation_utils.py CHANGED
@@ -198,9 +198,8 @@ def _decode_default(
198
  raw_text_len: int,
199
  verbose: bool = False,
200
  return_end_reason: bool = False,
201
- errors: str='replace',
202
  ):
203
- trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:]
204
  if verbose:
205
  print("\nRaw Generate: ", trim_decode_tokens)
206
 
@@ -232,7 +231,6 @@ def _decode_chatml(
232
  context_length: int,
233
  verbose: bool = False,
234
  return_end_reason: bool = False,
235
- errors: str='replace'
236
  ):
237
  end_reason = f"Gen length {len(tokens)}"
238
  eod_token_idx = context_length
@@ -241,9 +239,9 @@ def _decode_chatml(
241
  end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
242
  break
243
 
244
- trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], errors=errors)[raw_text_len:]
245
  if verbose:
246
- print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens, errors=errors)[raw_text_len:])
247
  print("\nRaw Generate:", trim_decode_tokens)
248
  print("\nEnd Reason:", end_reason)
249
  for stop_word in stop_words:
@@ -266,7 +264,6 @@ def decode_tokens(
266
  chat_format: str,
267
  verbose: bool = False,
268
  return_end_reason: bool = False,
269
- errors: str="replace",
270
  ) -> str:
271
  if torch.is_tensor(tokens):
272
  tokens = tokens.cpu().numpy().tolist()
@@ -281,7 +278,6 @@ def decode_tokens(
281
  context_length=context_length,
282
  verbose=verbose,
283
  return_end_reason=return_end_reason,
284
- errors=errors,
285
  )
286
  elif chat_format == "raw":
287
  return _decode_default(
@@ -292,7 +288,6 @@ def decode_tokens(
292
  raw_text_len=raw_text_len,
293
  verbose=verbose,
294
  return_end_reason=return_end_reason,
295
- errors=errors,
296
  )
297
  else:
298
  raise NotImplementedError(f"Unknown chat format {chat_format!r}")
 
198
  raw_text_len: int,
199
  verbose: bool = False,
200
  return_end_reason: bool = False,
 
201
  ):
202
+ trim_decode_tokens = tokenizer.decode(tokens)[raw_text_len:]
203
  if verbose:
204
  print("\nRaw Generate: ", trim_decode_tokens)
205
 
 
231
  context_length: int,
232
  verbose: bool = False,
233
  return_end_reason: bool = False,
 
234
  ):
235
  end_reason = f"Gen length {len(tokens)}"
236
  eod_token_idx = context_length
 
239
  end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
240
  break
241
 
242
+ trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx])[raw_text_len:]
243
  if verbose:
244
+ print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens)[raw_text_len:])
245
  print("\nRaw Generate:", trim_decode_tokens)
246
  print("\nEnd Reason:", end_reason)
247
  for stop_word in stop_words:
 
264
  chat_format: str,
265
  verbose: bool = False,
266
  return_end_reason: bool = False,
 
267
  ) -> str:
268
  if torch.is_tensor(tokens):
269
  tokens = tokens.cpu().numpy().tolist()
 
278
  context_length=context_length,
279
  verbose=verbose,
280
  return_end_reason=return_end_reason,
 
281
  )
282
  elif chat_format == "raw":
283
  return _decode_default(
 
288
  raw_text_len=raw_text_len,
289
  verbose=verbose,
290
  return_end_reason=return_end_reason,
 
291
  )
292
  else:
293
  raise NotImplementedError(f"Unknown chat format {chat_format!r}")
tokenization_qwen.py CHANGED
@@ -27,33 +27,20 @@ IMEND = "<|im_end|>"
27
  # regular texts, the surface forms of special tokens need to be
28
  # as different as possible to minimize the impact
29
  EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
30
- # changed to use actual index to avoid misconfiguration with vocabulary expansion
31
- SPECIAL_START_ID = 151643
32
- SPECIAL_TOKENS = tuple(
33
- enumerate(
34
- (
35
- (
36
- ENDOFTEXT,
37
- IMSTART,
38
- IMEND,
39
- )
40
- + EXTRAS
41
- ),
42
- start=SPECIAL_START_ID,
43
- )
44
- )
45
- SPECIAL_TOKENS_SET = set(t for i, t in SPECIAL_TOKENS)
46
 
47
 
48
  def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
49
- with open(tiktoken_bpe_file, "rb") as f:
50
- contents = f.read()
51
  return {
52
  base64.b64decode(token): int(rank)
53
  for token, rank in (line.split() for line in contents.splitlines() if line)
54
  }
55
 
56
-
57
  class QWenTokenizer(PreTrainedTokenizer):
58
  """QWen tokenizer."""
59
 
@@ -63,35 +50,20 @@ class QWenTokenizer(PreTrainedTokenizer):
63
  self,
64
  vocab_file,
65
  errors="replace",
66
- extra_vocab_file=None,
67
  **kwargs,
68
  ):
69
  super().__init__(**kwargs)
70
 
71
- # how to handle errors in decoding UTF-8 byte sequences
72
- # use ignore if you are in streaming inference
73
- self.errors = errors
74
 
75
- self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: Dict[bytes, int]
76
  self.special_tokens = {
77
  token: index
78
- for index, token in SPECIAL_TOKENS
 
 
79
  }
80
 
81
- # try load extra vocab from file
82
- if extra_vocab_file is not None:
83
- used_ids = set(self.mergeable_ranks.values()) | set(self.special_tokens.values())
84
- extra_mergeable_ranks = _load_tiktoken_bpe(extra_vocab_file)
85
- for token, index in extra_mergeable_ranks.items():
86
- if token in self.mergeable_ranks:
87
- logger.info(f"extra token {token} exists, skipping")
88
- continue
89
- if index in used_ids:
90
- logger.info(f'the index {index} for extra token {token} exists, skipping')
91
- continue
92
- self.mergeable_ranks[token] = index
93
- # the index may be sparse after this, but don't worry tiktoken.Encoding will handle this
94
-
95
  enc = tiktoken.Encoding(
96
  "Qwen",
97
  pat_str=PAT_STR,
@@ -113,23 +85,6 @@ class QWenTokenizer(PreTrainedTokenizer):
113
  self.im_start_id = self.special_tokens[IMSTART]
114
  self.im_end_id = self.special_tokens[IMEND]
115
 
116
- def __getstate__(self):
117
- # for pickle lovers
118
- state = self.__dict__.copy()
119
- del state["tokenizer"]
120
- return state
121
-
122
- def __setstate__(self, state):
123
- # tokenizer is not python native; don't pass it; rebuild it
124
- self.__dict__.update(state)
125
- enc = tiktoken.Encoding(
126
- "Qwen",
127
- pat_str=PAT_STR,
128
- mergeable_ranks=self.mergeable_ranks,
129
- special_tokens=self.special_tokens,
130
- )
131
- self.tokenizer = enc
132
-
133
  def __len__(self) -> int:
134
  return self.tokenizer.n_vocab
135
 
@@ -152,17 +107,13 @@ class QWenTokenizer(PreTrainedTokenizer):
152
  ids.append(self.mergeable_ranks.get(token))
153
  return ids
154
 
155
- def _add_tokens(
156
- self,
157
- new_tokens: Union[List[str], List[AddedToken]],
158
- special_tokens: bool = False,
159
- ) -> int:
160
  if not special_tokens and new_tokens:
161
- raise ValueError("Adding regular tokens is not supported")
162
  for token in new_tokens:
163
  surface_form = token.content if isinstance(token, AddedToken) else token
164
- if surface_form not in SPECIAL_TOKENS_SET:
165
- raise ValueError("Adding unknown special tokens is not supported")
166
  return 0
167
 
168
  def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
@@ -266,11 +217,10 @@ class QWenTokenizer(PreTrainedTokenizer):
266
  self,
267
  token_ids: Union[int, List[int]],
268
  skip_special_tokens: bool = False,
269
- errors: str = None,
270
  **kwargs,
271
  ) -> str:
272
  if isinstance(token_ids, int):
273
  token_ids = [token_ids]
274
  if skip_special_tokens:
275
  token_ids = [i for i in token_ids if i < self.eod_id]
276
- return self.tokenizer.decode(token_ids, errors=errors or self.errors)
 
27
  # regular texts, the surface forms of special tokens need to be
28
  # as different as possible to minimize the impact
29
  EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
30
+ SPECIAL_TOKENS = (
31
+ ENDOFTEXT,
32
+ IMSTART,
33
+ IMEND,
34
+ ) + EXTRAS
 
 
 
 
 
 
 
 
 
 
 
35
 
36
 
37
  def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
38
+ contents = open(tiktoken_bpe_file, "rb").read()
 
39
  return {
40
  base64.b64decode(token): int(rank)
41
  for token, rank in (line.split() for line in contents.splitlines() if line)
42
  }
43
 
 
44
  class QWenTokenizer(PreTrainedTokenizer):
45
  """QWen tokenizer."""
46
 
 
50
  self,
51
  vocab_file,
52
  errors="replace",
 
53
  **kwargs,
54
  ):
55
  super().__init__(**kwargs)
56
 
57
+ self.errors = errors # how to handle errors in decoding
 
 
58
 
59
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
60
  self.special_tokens = {
61
  token: index
62
+ for index, token in enumerate(
63
+ SPECIAL_TOKENS, start=len(self.mergeable_ranks)
64
+ )
65
  }
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  enc = tiktoken.Encoding(
68
  "Qwen",
69
  pat_str=PAT_STR,
 
85
  self.im_start_id = self.special_tokens[IMSTART]
86
  self.im_end_id = self.special_tokens[IMEND]
87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  def __len__(self) -> int:
89
  return self.tokenizer.n_vocab
90
 
 
107
  ids.append(self.mergeable_ranks.get(token))
108
  return ids
109
 
110
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
 
 
 
 
111
  if not special_tokens and new_tokens:
112
+ raise ValueError('Adding regular tokens is not supported')
113
  for token in new_tokens:
114
  surface_form = token.content if isinstance(token, AddedToken) else token
115
+ if surface_form not in SPECIAL_TOKENS:
116
+ raise ValueError('Adding unknown special tokens is not supported')
117
  return 0
118
 
119
  def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
 
217
  self,
218
  token_ids: Union[int, List[int]],
219
  skip_special_tokens: bool = False,
 
220
  **kwargs,
221
  ) -> str:
222
  if isinstance(token_ids, int):
223
  token_ids = [token_ids]
224
  if skip_special_tokens:
225
  token_ids = [i for i in token_ids if i < self.eod_id]
226
+ return self.tokenizer.decode(token_ids, errors=self.errors)
tokenizer_config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "model_max_length": 32768,
3
  "tokenizer_class": "QWenTokenizer",
4
  "auto_map": {
5
  "AutoTokenizer": [
 
1
  {
2
+ "model_max_length": 8192,
3
  "tokenizer_class": "QWenTokenizer",
4
  "auto_map": {
5
  "AutoTokenizer": [