.gitattributes CHANGED
@@ -43,11 +43,3 @@ pytorch_model-00001-of-00008.bin filter=lfs diff=lfs merge=lfs -text
43
  pytorch_model-00002-of-00008.bin filter=lfs diff=lfs merge=lfs -text
44
  pytorch_model-00005-of-00008.bin filter=lfs diff=lfs merge=lfs -text
45
  pytorch_model.bin.index.json filter=lfs diff=lfs merge=lfs -text
46
- model-00001-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
47
- model-00002-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
48
- model-00003-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
49
- model-00004-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
50
- model-00005-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
51
- model-00006-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
52
- model-00007-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
53
- model-00008-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
 
43
  pytorch_model-00002-of-00008.bin filter=lfs diff=lfs merge=lfs -text
44
  pytorch_model-00005-of-00008.bin filter=lfs diff=lfs merge=lfs -text
45
  pytorch_model.bin.index.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -20,15 +20,10 @@ license: other
20
 
21
  [![evaluation](https://github.com/InternLM/InternLM/assets/22529082/f80a2a58-5ddf-471a-8da4-32ab65c8fd3b)](https://github.com/internLM/OpenCompass/)
22
 
23
- [💻Github Repo](https://github.com/InternLM/InternLM) • [🤔Reporting Issues](https://github.com/InternLM/InternLM/issues/new) • [📜Technical Report](https://arxiv.org/abs/2403.17297)
24
 
25
  </div>
26
 
27
- <p align="center">
28
- 👋 join us on <a href="https://discord.gg/xa29JuW87d" target="_blank">Discord</a> and <a href="https://github.com/InternLM/InternLM/assets/25839884/a6aad896-7232-4220-ac84-9e070c2633ce" target="_blank">WeChat</a>
29
- </p>
30
-
31
-
32
 
33
  ## Introduction
34
 
@@ -99,92 +94,10 @@ for response, history in model.stream_chat(tokenizer, "Hello", history=[]):
99
  length = len(response)
100
  ```
101
 
102
- ## Deployment
103
-
104
- ### LMDeploy
105
-
106
- LMDeploy is a toolkit for compressing, deploying, and serving LLM, developed by the MMRazor and MMDeploy teams.
107
-
108
- ```bash
109
- pip install lmdeploy
110
- ```
111
-
112
- You can run batch inference locally with the following python code:
113
-
114
- ```python
115
- import lmdeploy
116
- pipe = lmdeploy.pipeline("internlm/internlm2-chat-7b")
117
- response = pipe(["Hi, pls intro yourself", "Shanghai is"])
118
- print(response)
119
- ```
120
-
121
- Or you can launch an OpenAI compatible server with the following command:
122
-
123
- ```bash
124
- lmdeploy serve api_server internlm/internlm2-chat-7b --model-name internlm2-chat-7b --server-port 23333
125
- ```
126
-
127
- Then you can send a chat request to the server:
128
-
129
- ```bash
130
- curl http://localhost:23333/v1/chat/completions \
131
- -H "Content-Type: application/json" \
132
- -d '{
133
- "model": "internlm2-chat-7b",
134
- "messages": [
135
- {"role": "system", "content": "You are a helpful assistant."},
136
- {"role": "user", "content": "Introduce deep learning to me."}
137
- ]
138
- }'
139
- ```
140
-
141
- Find more details in the [LMDeploy documentation](https://lmdeploy.readthedocs.io/en/latest/)
142
-
143
- ### vLLM
144
-
145
- Launch OpenAI compatible server with `vLLM>=0.3.2`:
146
-
147
- ```bash
148
- pip install vllm
149
- ```
150
-
151
- ```bash
152
- python -m vllm.entrypoints.openai.api_server --model internlm/internlm2-chat-7b --served-model-name internlm2-chat-7b --trust-remote-code
153
- ```
154
-
155
- Then you can send a chat request to the server:
156
-
157
- ```bash
158
- curl http://localhost:8000/v1/chat/completions \
159
- -H "Content-Type: application/json" \
160
- -d '{
161
- "model": "internlm2-chat-7b",
162
- "messages": [
163
- {"role": "system", "content": "You are a helpful assistant."},
164
- {"role": "user", "content": "Introduce deep learning to me."}
165
- ]
166
- }'
167
- ```
168
-
169
- Find more details in the [vLLM documentation](https://docs.vllm.ai/en/latest/index.html)
170
-
171
  ## Open Source License
172
 
173
  The code is licensed under Apache-2.0, while model weights are fully open for academic research and also allow **free** commercial usage. To apply for a commercial license, please fill in the [application form (English)](https://wj.qq.com/s2/12727483/5dba/)/[申请表(中文)](https://wj.qq.com/s2/12725412/f7c1/). For other questions or collaborations, please contact <internlm@pjlab.org.cn>.
174
 
175
- ## Citation
176
-
177
- ```
178
- @misc{cai2024internlm2,
179
- title={InternLM2 Technical Report},
180
- author={Zheng Cai and Maosong Cao and Haojiong Chen and Kai Chen and Keyu Chen and Xin Chen and Xun Chen and Zehui Chen and Zhi Chen and Pei Chu and Xiaoyi Dong and Haodong Duan and Qi Fan and Zhaoye Fei and Yang Gao and Jiaye Ge and Chenya Gu and Yuzhe Gu and Tao Gui and Aijia Guo and Qipeng Guo and Conghui He and Yingfan Hu and Ting Huang and Tao Jiang and Penglong Jiao and Zhenjiang Jin and Zhikai Lei and Jiaxing Li and Jingwen Li and Linyang Li and Shuaibin Li and Wei Li and Yining Li and Hongwei Liu and Jiangning Liu and Jiawei Hong and Kaiwen Liu and Kuikun Liu and Xiaoran Liu and Chengqi Lv and Haijun Lv and Kai Lv and Li Ma and Runyuan Ma and Zerun Ma and Wenchang Ning and Linke Ouyang and Jiantao Qiu and Yuan Qu and Fukai Shang and Yunfan Shao and Demin Song and Zifan Song and Zhihao Sui and Peng Sun and Yu Sun and Huanze Tang and Bin Wang and Guoteng Wang and Jiaqi Wang and Jiayu Wang and Rui Wang and Yudong Wang and Ziyi Wang and Xingjian Wei and Qizhen Weng and Fan Wu and Yingtong Xiong and Chao Xu and Ruiliang Xu and Hang Yan and Yirong Yan and Xiaogui Yang and Haochen Ye and Huaiyuan Ying and Jia Yu and Jing Yu and Yuhang Zang and Chuyu Zhang and Li Zhang and Pan Zhang and Peng Zhang and Ruijie Zhang and Shuo Zhang and Songyang Zhang and Wenjian Zhang and Wenwei Zhang and Xingcheng Zhang and Xinyue Zhang and Hui Zhao and Qian Zhao and Xiaomeng Zhao and Fengzhe Zhou and Zaida Zhou and Jingming Zhuo and Yicheng Zou and Xipeng Qiu and Yu Qiao and Dahua Lin},
181
- year={2024},
182
- eprint={2403.17297},
183
- archivePrefix={arXiv},
184
- primaryClass={cs.CL}
185
- }
186
- ```
187
-
188
  ## 简介
189
 
190
  InternLM2 ,即书生·浦语大模型第二代,开源了面向实用场景的70亿参数基础模型与对话模型 (InternLM2-Chat-7B)。模型具有以下特点:
@@ -250,88 +163,6 @@ for response, history in model.stream_chat(tokenizer, "你好", history=[]):
250
  length = len(response)
251
  ```
252
 
253
- ## 部署
254
-
255
- ### LMDeploy
256
-
257
- LMDeploy 由 MMDeploy 和 MMRazor 团队联合开发,是涵盖了 LLM 任务的全套轻量化、部署和服务解决方案。
258
-
259
- ```bash
260
- pip install lmdeploy
261
- ```
262
-
263
- 你可以使用以下 python 代码进行本地批量推理:
264
-
265
- ```python
266
- import lmdeploy
267
- pipe = lmdeploy.pipeline("internlm/internlm2-chat-7b")
268
- response = pipe(["Hi, pls intro yourself", "Shanghai is"])
269
- print(response)
270
- ```
271
-
272
- 或者你可以使用以下命令启动兼容 OpenAI API 的服务:
273
-
274
- ```bash
275
- lmdeploy serve api_server internlm/internlm2-chat-7b --server-port 23333
276
- ```
277
-
278
- 然后你可以向服务端发起一个聊天请求:
279
-
280
- ```bash
281
- curl http://localhost:23333/v1/chat/completions \
282
- -H "Content-Type: application/json" \
283
- -d '{
284
- "model": "internlm2-chat-7b",
285
- "messages": [
286
- {"role": "system", "content": "你是个友善的AI助手。"},
287
- {"role": "user", "content": "介绍一下深度学习。"}
288
- ]
289
- }'
290
- ```
291
-
292
- 更多信息请查看 [LMDeploy 文档](https://lmdeploy.readthedocs.io/en/latest/)
293
-
294
- ### vLLM
295
-
296
- 使用`vLLM>=0.3.2`启动兼容 OpenAI API 的服务:
297
-
298
- ```bash
299
- pip install vllm
300
- ```
301
-
302
- ```bash
303
- python -m vllm.entrypoints.openai.api_server --model internlm/internlm2-chat-7b --trust-remote-code
304
- ```
305
-
306
- 然后你可以向服务端发起一个聊天请求:
307
-
308
- ```bash
309
- curl http://localhost:8000/v1/chat/completions \
310
- -H "Content-Type: application/json" \
311
- -d '{
312
- "model": "internlm2-chat-7b",
313
- "messages": [
314
- {"role": "system", "content": "你是个友善的AI助手。"},
315
- {"role": "user", "content": "介绍一下深度学习。"}
316
- ]
317
- }'
318
- ```
319
-
320
- 更多信息请查看 [vLLM 文档](https://docs.vllm.ai/en/latest/index.html)
321
-
322
  ## 开源许可证
323
 
324
- 本仓库的代码依照 Apache-2.0 协议开源。模型权重对学术研究完全开放,也可申请免费的商业使用授权([申请表](https://wj.qq.com/s2/12725412/f7c1/))。其他问题与合作请联系 <internlm@pjlab.org.cn>。
325
-
326
- ## 引用
327
-
328
- ```
329
- @misc{cai2024internlm2,
330
- title={InternLM2 Technical Report},
331
- author={Zheng Cai and Maosong Cao and Haojiong Chen and Kai Chen and Keyu Chen and Xin Chen and Xun Chen and Zehui Chen and Zhi Chen and Pei Chu and Xiaoyi Dong and Haodong Duan and Qi Fan and Zhaoye Fei and Yang Gao and Jiaye Ge and Chenya Gu and Yuzhe Gu and Tao Gui and Aijia Guo and Qipeng Guo and Conghui He and Yingfan Hu and Ting Huang and Tao Jiang and Penglong Jiao and Zhenjiang Jin and Zhikai Lei and Jiaxing Li and Jingwen Li and Linyang Li and Shuaibin Li and Wei Li and Yining Li and Hongwei Liu and Jiangning Liu and Jiawei Hong and Kaiwen Liu and Kuikun Liu and Xiaoran Liu and Chengqi Lv and Haijun Lv and Kai Lv and Li Ma and Runyuan Ma and Zerun Ma and Wenchang Ning and Linke Ouyang and Jiantao Qiu and Yuan Qu and Fukai Shang and Yunfan Shao and Demin Song and Zifan Song and Zhihao Sui and Peng Sun and Yu Sun and Huanze Tang and Bin Wang and Guoteng Wang and Jiaqi Wang and Jiayu Wang and Rui Wang and Yudong Wang and Ziyi Wang and Xingjian Wei and Qizhen Weng and Fan Wu and Yingtong Xiong and Chao Xu and Ruiliang Xu and Hang Yan and Yirong Yan and Xiaogui Yang and Haochen Ye and Huaiyuan Ying and Jia Yu and Jing Yu and Yuhang Zang and Chuyu Zhang and Li Zhang and Pan Zhang and Peng Zhang and Ruijie Zhang and Shuo Zhang and Songyang Zhang and Wenjian Zhang and Wenwei Zhang and Xingcheng Zhang and Xinyue Zhang and Hui Zhao and Qian Zhao and Xiaomeng Zhao and Fengzhe Zhou and Zaida Zhou and Jingming Zhuo and Yicheng Zou and Xipeng Qiu and Yu Qiao and Dahua Lin},
332
- year={2024},
333
- eprint={2403.17297},
334
- archivePrefix={arXiv},
335
- primaryClass={cs.CL}
336
- }
337
- ```
 
20
 
21
  [![evaluation](https://github.com/InternLM/InternLM/assets/22529082/f80a2a58-5ddf-471a-8da4-32ab65c8fd3b)](https://github.com/internLM/OpenCompass/)
22
 
23
+ [💻Github Repo](https://github.com/InternLM/InternLM) • [🤔Reporting Issues](https://github.com/InternLM/InternLM/issues/new)
24
 
25
  </div>
26
 
 
 
 
 
 
27
 
28
  ## Introduction
29
 
 
94
  length = len(response)
95
  ```
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  ## Open Source License
98
 
99
  The code is licensed under Apache-2.0, while model weights are fully open for academic research and also allow **free** commercial usage. To apply for a commercial license, please fill in the [application form (English)](https://wj.qq.com/s2/12727483/5dba/)/[申请表(中文)](https://wj.qq.com/s2/12725412/f7c1/). For other questions or collaborations, please contact <internlm@pjlab.org.cn>.
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  ## 简介
102
 
103
  InternLM2 ,即书生·浦语大模型第二代,开源了面向实用场景的70亿参数基础模型与对话模型 (InternLM2-Chat-7B)。模型具有以下特点:
 
163
  length = len(response)
164
  ```
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  ## 开源许可证
167
 
168
+ 本仓库的代码依照 Apache-2.0 协议开源。模型权重对学术研究完全开放,也可申请免费的商业使用授权([申请表](https://wj.qq.com/s2/12725412/f7c1/))。其他问题与合作请联系 <internlm@pjlab.org.cn>。
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json CHANGED
@@ -2,9 +2,8 @@
2
  "architectures": [
3
  "InternLM2ForCausalLM"
4
  ],
5
- "attn_implementation": "eager",
6
  "auto_map": {
7
- "AutoConfig": "configuration_internlm2.InternLM2Config",
8
  "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM",
9
  "AutoModel": "modeling_internlm2.InternLM2ForCausalLM"
10
  },
@@ -16,21 +15,20 @@
16
  "initializer_range": 0.02,
17
  "intermediate_size": 14336,
18
  "max_position_embeddings": 32768,
19
- "model_type": "internlm2",
20
  "num_attention_heads": 32,
21
  "num_hidden_layers": 32,
22
  "num_key_value_heads": 8,
23
  "pad_token_id": 2,
24
  "rms_norm_eps": 1e-05,
25
  "rope_scaling": {
26
- "type": "dynamic",
27
- "factor": 2.0
28
  },
29
  "rope_theta": 1000000,
30
  "tie_word_embeddings": false,
31
- "torch_dtype": "bfloat16",
32
- "transformers_version": "4.37.1",
33
  "use_cache": true,
34
- "vocab_size": 92544,
35
- "pretraining_tp": 1
36
  }
 
2
  "architectures": [
3
  "InternLM2ForCausalLM"
4
  ],
 
5
  "auto_map": {
6
+ "AutoConfig": "configuration_internlm.InternLMConfig",
7
  "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM",
8
  "AutoModel": "modeling_internlm2.InternLM2ForCausalLM"
9
  },
 
15
  "initializer_range": 0.02,
16
  "intermediate_size": 14336,
17
  "max_position_embeddings": 32768,
18
+ "model_type": "internlm",
19
  "num_attention_heads": 32,
20
  "num_hidden_layers": 32,
21
  "num_key_value_heads": 8,
22
  "pad_token_id": 2,
23
  "rms_norm_eps": 1e-05,
24
  "rope_scaling": {
25
+ "factor": 1.0,
26
+ "type": "dynamic"
27
  },
28
  "rope_theta": 1000000,
29
  "tie_word_embeddings": false,
30
+ "torch_dtype": "float16",
31
+ "transformers_version": "4.33.2",
32
  "use_cache": true,
33
+ "vocab_size": 92544
 
34
  }
configuration_internlm2.py → configuration_internlm.py RENAMED
@@ -1,7 +1,10 @@
1
  # coding=utf-8
2
- # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
3
  #
4
- # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
 
 
 
5
  #
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
  # you may not use this file except in compliance with the License.
@@ -14,22 +17,21 @@
14
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
  # See the License for the specific language governing permissions and
16
  # limitations under the License.
17
- """ InternLM2 model configuration"""
18
 
19
  from transformers.configuration_utils import PretrainedConfig
20
  from transformers.utils import logging
21
 
22
  logger = logging.get_logger(__name__)
23
 
24
- INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
25
 
26
 
27
- # Modified from transformers.model.llama.configuration_llama.LlamaConfig
28
- class InternLM2Config(PretrainedConfig):
29
  r"""
30
- This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
31
- an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
32
- configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
33
 
34
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
  documentation from [`PretrainedConfig`] for more information.
@@ -37,16 +39,16 @@ class InternLM2Config(PretrainedConfig):
37
 
38
  Args:
39
  vocab_size (`int`, *optional*, defaults to 32000):
40
- Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
41
- `inputs_ids` passed when calling [`InternLM2Model`]
42
  hidden_size (`int`, *optional*, defaults to 4096):
43
  Dimension of the hidden representations.
44
  intermediate_size (`int`, *optional*, defaults to 11008):
45
  Dimension of the MLP representations.
46
  num_hidden_layers (`int`, *optional*, defaults to 32):
47
- Number of hidden layers in the Transformer decoder.
48
  num_attention_heads (`int`, *optional*, defaults to 32):
49
- Number of attention heads for each attention layer in the Transformer decoder.
50
  num_key_value_heads (`int`, *optional*):
51
  This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
  `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
@@ -58,42 +60,33 @@ class InternLM2Config(PretrainedConfig):
58
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
  The non-linear activation function (function or string) in the decoder.
60
  max_position_embeddings (`int`, *optional*, defaults to 2048):
61
- The maximum sequence length that this model might ever be used with. InternLM2 supports up to 32768 tokens.
 
62
  initializer_range (`float`, *optional*, defaults to 0.02):
63
  The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
65
  The epsilon used by the rms normalization layers.
66
  use_cache (`bool`, *optional*, defaults to `True`):
67
  Whether or not the model should return the last key/values attentions (not used by all models). Only
68
  relevant if `config.is_decoder=True`.
69
- pad_token_id (`int`, *optional*):
70
- Padding token id.
71
- bos_token_id (`int`, *optional*, defaults to 1):
72
- Beginning of stream token id.
73
- eos_token_id (`int`, *optional*, defaults to 2):
74
- End of stream token id.
75
- pretraining_tp (`int`, *optional*, defaults to 1):
76
- Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
77
- document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism)
78
- to understand more about it. This value is necessary to ensure exact reproducibility
79
- of the pretraining results. Please refer to [this
80
- issue](https://github.com/pytorch/pytorch/issues/76232).
81
- tie_word_embeddings (`bool`, *optional*, defaults to `False`):
82
  Whether to tie weight embeddings
83
- rope_theta (`float`, *optional*, defaults to 10000.0):
84
- The base period of the RoPE embeddings.
85
- rope_scaling (`Dict`, *optional*):
86
- Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
87
- strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
88
- `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
89
- `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
90
- these scaling strategies behave:
91
- https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
92
- experimental feature, subject to breaking API changes in future versions.
93
- """
 
 
 
 
94
  _auto_class = "AutoConfig"
95
- model_type = "internlm2"
96
- keys_to_ignore_at_inference = ["past_key_values"]
97
 
98
  def __init__( # pylint: disable=W0102
99
  self,
@@ -111,12 +104,11 @@ class InternLM2Config(PretrainedConfig):
111
  pad_token_id=0,
112
  bos_token_id=1,
113
  eos_token_id=2,
114
- pretraining_tp=1,
115
  tie_word_embeddings=False,
116
  bias=True,
117
  rope_theta=10000,
118
  rope_scaling=None,
119
- attn_implementation=None,
120
  **kwargs,
121
  ):
122
  self.vocab_size = vocab_size
@@ -134,15 +126,14 @@ class InternLM2Config(PretrainedConfig):
134
  self.hidden_act = hidden_act
135
  self.initializer_range = initializer_range
136
  self.rms_norm_eps = rms_norm_eps
137
- self.pretraining_tp = pretraining_tp
138
  self.use_cache = use_cache
139
  self.rope_theta = rope_theta
140
  self.rope_scaling = rope_scaling
141
  self._rope_scaling_validation()
 
142
  self.attn_implementation = attn_implementation
143
  if self.attn_implementation is None:
144
  self.attn_implementation = "eager"
145
-
146
  super().__init__(
147
  pad_token_id=pad_token_id,
148
  bos_token_id=bos_token_id,
@@ -169,12 +160,5 @@ class InternLM2Config(PretrainedConfig):
169
  raise ValueError(
170
  f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
171
  )
172
- if (
173
- rope_scaling_factor is None
174
- or not isinstance(rope_scaling_factor, (float, int))
175
- or rope_scaling_factor < 1.0
176
- ):
177
- raise ValueError(
178
- f"`rope_scaling`'s factor field must be a number >= 1, got {rope_scaling_factor} "
179
- f"of type {type(rope_scaling_factor)}"
180
- )
 
1
  # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
  #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
  #
9
  # Licensed under the Apache License, Version 2.0 (the "License");
10
  # you may not use this file except in compliance with the License.
 
17
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
  # See the License for the specific language governing permissions and
19
  # limitations under the License.
20
+ """ InternLM model configuration"""
21
 
22
  from transformers.configuration_utils import PretrainedConfig
23
  from transformers.utils import logging
24
 
25
  logger = logging.get_logger(__name__)
26
 
27
+ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
28
 
29
 
30
+ class InternLMConfig(PretrainedConfig):
 
31
  r"""
32
+ This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
+ an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
35
 
36
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
  documentation from [`PretrainedConfig`] for more information.
 
39
 
40
  Args:
41
  vocab_size (`int`, *optional*, defaults to 32000):
42
+ Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`InternLMModel`]
44
  hidden_size (`int`, *optional*, defaults to 4096):
45
  Dimension of the hidden representations.
46
  intermediate_size (`int`, *optional*, defaults to 11008):
47
  Dimension of the MLP representations.
48
  num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
  num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
  num_key_value_heads (`int`, *optional*):
53
  This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
  `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
 
60
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
  The non-linear activation function (function or string) in the decoder.
62
  max_position_embeddings (`int`, *optional*, defaults to 2048):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
  initializer_range (`float`, *optional*, defaults to 0.02):
66
  The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
68
  The epsilon used by the rms normalization layers.
69
  use_cache (`bool`, *optional*, defaults to `True`):
70
  Whether or not the model should return the last key/values attentions (not used by all models). Only
71
  relevant if `config.is_decoder=True`.
72
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
 
 
 
 
 
 
 
 
 
 
 
 
73
  Whether to tie weight embeddings
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import InternLMModel, InternLMConfig
78
+
79
+ >>> # Initializing a InternLM internlm-7b style configuration
80
+ >>> configuration = InternLMConfig()
81
+
82
+ >>> # Initializing a model from the internlm-7b style configuration
83
+ >>> model = InternLMModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+ model_type = "internlm"
89
  _auto_class = "AutoConfig"
 
 
90
 
91
  def __init__( # pylint: disable=W0102
92
  self,
 
104
  pad_token_id=0,
105
  bos_token_id=1,
106
  eos_token_id=2,
 
107
  tie_word_embeddings=False,
108
  bias=True,
109
  rope_theta=10000,
110
  rope_scaling=None,
111
+ attn_implementation="eager",
112
  **kwargs,
113
  ):
114
  self.vocab_size = vocab_size
 
126
  self.hidden_act = hidden_act
127
  self.initializer_range = initializer_range
128
  self.rms_norm_eps = rms_norm_eps
 
129
  self.use_cache = use_cache
130
  self.rope_theta = rope_theta
131
  self.rope_scaling = rope_scaling
132
  self._rope_scaling_validation()
133
+
134
  self.attn_implementation = attn_implementation
135
  if self.attn_implementation is None:
136
  self.attn_implementation = "eager"
 
137
  super().__init__(
138
  pad_token_id=pad_token_id,
139
  bos_token_id=bos_token_id,
 
160
  raise ValueError(
161
  f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
162
  )
163
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
164
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
 
 
 
 
 
 
 
generation_config.json CHANGED
@@ -1,9 +1,7 @@
1
  {
 
2
  "bos_token_id": 1,
3
- "eos_token_id": [
4
- 2,
5
- 92542
6
- ],
7
  "pad_token_id": 2,
8
- "transformers_version": "4.37.1"
9
  }
 
1
  {
2
+ "_from_model_config": true,
3
  "bos_token_id": 1,
4
+ "eos_token_id": 2,
 
 
 
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.33.2"
7
  }
model-00005-of-00008.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a28f7db7ba9fc0991e3552a156e11b942494e945c4c134003aad40bb4e49ed6
3
- size 1979780456
 
 
 
 
model-00006-of-00008.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d330d795037a2db1c7a15a991c88ba6bf2bc2890e36210571d7499d1ae6dfd51
3
- size 1946242728
 
 
 
 
model-00007-of-00008.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6c65720f77de3aec168dff5b0f11e779f4c9059f2fdb04c67bae4321f5efd6a
3
- size 1979780456
 
 
 
 
model-00008-of-00008.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb488551289c4630b7b1fb015acf2ff52a206383c96370a7664b42253a007eae
3
- size 1748035640
 
 
 
 
model.safetensors.index.json DELETED
@@ -1,234 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_size": 15475417088
4
- },
5
- "weight_map": {
6
- "model.layers.0.attention.wo.weight": "model-00001-of-00008.safetensors",
7
- "model.layers.0.attention.wqkv.weight": "model-00001-of-00008.safetensors",
8
- "model.layers.0.attention_norm.weight": "model-00001-of-00008.safetensors",
9
- "model.layers.0.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
10
- "model.layers.0.feed_forward.w2.weight": "model-00001-of-00008.safetensors",
11
- "model.layers.0.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
12
- "model.layers.0.ffn_norm.weight": "model-00001-of-00008.safetensors",
13
- "model.layers.1.attention.wo.weight": "model-00001-of-00008.safetensors",
14
- "model.layers.1.attention.wqkv.weight": "model-00001-of-00008.safetensors",
15
- "model.layers.1.attention_norm.weight": "model-00001-of-00008.safetensors",
16
- "model.layers.1.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
17
- "model.layers.1.feed_forward.w2.weight": "model-00001-of-00008.safetensors",
18
- "model.layers.1.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
19
- "model.layers.1.ffn_norm.weight": "model-00001-of-00008.safetensors",
20
- "model.layers.10.attention.wo.weight": "model-00003-of-00008.safetensors",
21
- "model.layers.10.attention.wqkv.weight": "model-00003-of-00008.safetensors",
22
- "model.layers.10.attention_norm.weight": "model-00003-of-00008.safetensors",
23
- "model.layers.10.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
24
- "model.layers.10.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
25
- "model.layers.10.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
26
- "model.layers.10.ffn_norm.weight": "model-00003-of-00008.safetensors",
27
- "model.layers.11.attention.wo.weight": "model-00003-of-00008.safetensors",
28
- "model.layers.11.attention.wqkv.weight": "model-00003-of-00008.safetensors",
29
- "model.layers.11.attention_norm.weight": "model-00004-of-00008.safetensors",
30
- "model.layers.11.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
31
- "model.layers.11.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
32
- "model.layers.11.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
33
- "model.layers.11.ffn_norm.weight": "model-00004-of-00008.safetensors",
34
- "model.layers.12.attention.wo.weight": "model-00004-of-00008.safetensors",
35
- "model.layers.12.attention.wqkv.weight": "model-00004-of-00008.safetensors",
36
- "model.layers.12.attention_norm.weight": "model-00004-of-00008.safetensors",
37
- "model.layers.12.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
38
- "model.layers.12.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
39
- "model.layers.12.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
40
- "model.layers.12.ffn_norm.weight": "model-00004-of-00008.safetensors",
41
- "model.layers.13.attention.wo.weight": "model-00004-of-00008.safetensors",
42
- "model.layers.13.attention.wqkv.weight": "model-00004-of-00008.safetensors",
43
- "model.layers.13.attention_norm.weight": "model-00004-of-00008.safetensors",
44
- "model.layers.13.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
45
- "model.layers.13.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
46
- "model.layers.13.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
47
- "model.layers.13.ffn_norm.weight": "model-00004-of-00008.safetensors",
48
- "model.layers.14.attention.wo.weight": "model-00004-of-00008.safetensors",
49
- "model.layers.14.attention.wqkv.weight": "model-00004-of-00008.safetensors",
50
- "model.layers.14.attention_norm.weight": "model-00004-of-00008.safetensors",
51
- "model.layers.14.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
52
- "model.layers.14.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
53
- "model.layers.14.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
54
- "model.layers.14.ffn_norm.weight": "model-00004-of-00008.safetensors",
55
- "model.layers.15.attention.wo.weight": "model-00004-of-00008.safetensors",
56
- "model.layers.15.attention.wqkv.weight": "model-00004-of-00008.safetensors",
57
- "model.layers.15.attention_norm.weight": "model-00004-of-00008.safetensors",
58
- "model.layers.15.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
59
- "model.layers.15.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
60
- "model.layers.15.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
61
- "model.layers.15.ffn_norm.weight": "model-00004-of-00008.safetensors",
62
- "model.layers.16.attention.wo.weight": "model-00004-of-00008.safetensors",
63
- "model.layers.16.attention.wqkv.weight": "model-00004-of-00008.safetensors",
64
- "model.layers.16.attention_norm.weight": "model-00005-of-00008.safetensors",
65
- "model.layers.16.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
66
- "model.layers.16.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
67
- "model.layers.16.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
68
- "model.layers.16.ffn_norm.weight": "model-00005-of-00008.safetensors",
69
- "model.layers.17.attention.wo.weight": "model-00005-of-00008.safetensors",
70
- "model.layers.17.attention.wqkv.weight": "model-00005-of-00008.safetensors",
71
- "model.layers.17.attention_norm.weight": "model-00005-of-00008.safetensors",
72
- "model.layers.17.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
73
- "model.layers.17.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
74
- "model.layers.17.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
75
- "model.layers.17.ffn_norm.weight": "model-00005-of-00008.safetensors",
76
- "model.layers.18.attention.wo.weight": "model-00005-of-00008.safetensors",
77
- "model.layers.18.attention.wqkv.weight": "model-00005-of-00008.safetensors",
78
- "model.layers.18.attention_norm.weight": "model-00005-of-00008.safetensors",
79
- "model.layers.18.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
80
- "model.layers.18.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
81
- "model.layers.18.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
82
- "model.layers.18.ffn_norm.weight": "model-00005-of-00008.safetensors",
83
- "model.layers.19.attention.wo.weight": "model-00005-of-00008.safetensors",
84
- "model.layers.19.attention.wqkv.weight": "model-00005-of-00008.safetensors",
85
- "model.layers.19.attention_norm.weight": "model-00005-of-00008.safetensors",
86
- "model.layers.19.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
87
- "model.layers.19.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
88
- "model.layers.19.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
89
- "model.layers.19.ffn_norm.weight": "model-00005-of-00008.safetensors",
90
- "model.layers.2.attention.wo.weight": "model-00001-of-00008.safetensors",
91
- "model.layers.2.attention.wqkv.weight": "model-00001-of-00008.safetensors",
92
- "model.layers.2.attention_norm.weight": "model-00002-of-00008.safetensors",
93
- "model.layers.2.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
94
- "model.layers.2.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
95
- "model.layers.2.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
96
- "model.layers.2.ffn_norm.weight": "model-00002-of-00008.safetensors",
97
- "model.layers.20.attention.wo.weight": "model-00005-of-00008.safetensors",
98
- "model.layers.20.attention.wqkv.weight": "model-00005-of-00008.safetensors",
99
- "model.layers.20.attention_norm.weight": "model-00006-of-00008.safetensors",
100
- "model.layers.20.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
101
- "model.layers.20.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
102
- "model.layers.20.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
103
- "model.layers.20.ffn_norm.weight": "model-00006-of-00008.safetensors",
104
- "model.layers.21.attention.wo.weight": "model-00006-of-00008.safetensors",
105
- "model.layers.21.attention.wqkv.weight": "model-00006-of-00008.safetensors",
106
- "model.layers.21.attention_norm.weight": "model-00006-of-00008.safetensors",
107
- "model.layers.21.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
108
- "model.layers.21.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
109
- "model.layers.21.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
110
- "model.layers.21.ffn_norm.weight": "model-00006-of-00008.safetensors",
111
- "model.layers.22.attention.wo.weight": "model-00006-of-00008.safetensors",
112
- "model.layers.22.attention.wqkv.weight": "model-00006-of-00008.safetensors",
113
- "model.layers.22.attention_norm.weight": "model-00006-of-00008.safetensors",
114
- "model.layers.22.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
115
- "model.layers.22.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
116
- "model.layers.22.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
117
- "model.layers.22.ffn_norm.weight": "model-00006-of-00008.safetensors",
118
- "model.layers.23.attention.wo.weight": "model-00006-of-00008.safetensors",
119
- "model.layers.23.attention.wqkv.weight": "model-00006-of-00008.safetensors",
120
- "model.layers.23.attention_norm.weight": "model-00006-of-00008.safetensors",
121
- "model.layers.23.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
122
- "model.layers.23.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
123
- "model.layers.23.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
124
- "model.layers.23.ffn_norm.weight": "model-00006-of-00008.safetensors",
125
- "model.layers.24.attention.wo.weight": "model-00006-of-00008.safetensors",
126
- "model.layers.24.attention.wqkv.weight": "model-00006-of-00008.safetensors",
127
- "model.layers.24.attention_norm.weight": "model-00006-of-00008.safetensors",
128
- "model.layers.24.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
129
- "model.layers.24.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
130
- "model.layers.24.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
131
- "model.layers.24.ffn_norm.weight": "model-00006-of-00008.safetensors",
132
- "model.layers.25.attention.wo.weight": "model-00006-of-00008.safetensors",
133
- "model.layers.25.attention.wqkv.weight": "model-00006-of-00008.safetensors",
134
- "model.layers.25.attention_norm.weight": "model-00007-of-00008.safetensors",
135
- "model.layers.25.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
136
- "model.layers.25.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
137
- "model.layers.25.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
138
- "model.layers.25.ffn_norm.weight": "model-00007-of-00008.safetensors",
139
- "model.layers.26.attention.wo.weight": "model-00007-of-00008.safetensors",
140
- "model.layers.26.attention.wqkv.weight": "model-00007-of-00008.safetensors",
141
- "model.layers.26.attention_norm.weight": "model-00007-of-00008.safetensors",
142
- "model.layers.26.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
143
- "model.layers.26.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
144
- "model.layers.26.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
145
- "model.layers.26.ffn_norm.weight": "model-00007-of-00008.safetensors",
146
- "model.layers.27.attention.wo.weight": "model-00007-of-00008.safetensors",
147
- "model.layers.27.attention.wqkv.weight": "model-00007-of-00008.safetensors",
148
- "model.layers.27.attention_norm.weight": "model-00007-of-00008.safetensors",
149
- "model.layers.27.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
150
- "model.layers.27.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
151
- "model.layers.27.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
152
- "model.layers.27.ffn_norm.weight": "model-00007-of-00008.safetensors",
153
- "model.layers.28.attention.wo.weight": "model-00007-of-00008.safetensors",
154
- "model.layers.28.attention.wqkv.weight": "model-00007-of-00008.safetensors",
155
- "model.layers.28.attention_norm.weight": "model-00007-of-00008.safetensors",
156
- "model.layers.28.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
157
- "model.layers.28.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
158
- "model.layers.28.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
159
- "model.layers.28.ffn_norm.weight": "model-00007-of-00008.safetensors",
160
- "model.layers.29.attention.wo.weight": "model-00007-of-00008.safetensors",
161
- "model.layers.29.attention.wqkv.weight": "model-00007-of-00008.safetensors",
162
- "model.layers.29.attention_norm.weight": "model-00008-of-00008.safetensors",
163
- "model.layers.29.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
164
- "model.layers.29.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
165
- "model.layers.29.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
166
- "model.layers.29.ffn_norm.weight": "model-00008-of-00008.safetensors",
167
- "model.layers.3.attention.wo.weight": "model-00002-of-00008.safetensors",
168
- "model.layers.3.attention.wqkv.weight": "model-00002-of-00008.safetensors",
169
- "model.layers.3.attention_norm.weight": "model-00002-of-00008.safetensors",
170
- "model.layers.3.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
171
- "model.layers.3.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
172
- "model.layers.3.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
173
- "model.layers.3.ffn_norm.weight": "model-00002-of-00008.safetensors",
174
- "model.layers.30.attention.wo.weight": "model-00008-of-00008.safetensors",
175
- "model.layers.30.attention.wqkv.weight": "model-00008-of-00008.safetensors",
176
- "model.layers.30.attention_norm.weight": "model-00008-of-00008.safetensors",
177
- "model.layers.30.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
178
- "model.layers.30.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
179
- "model.layers.30.feed_forward.w3.weight": "model-00008-of-00008.safetensors",
180
- "model.layers.30.ffn_norm.weight": "model-00008-of-00008.safetensors",
181
- "model.layers.31.attention.wo.weight": "model-00008-of-00008.safetensors",
182
- "model.layers.31.attention.wqkv.weight": "model-00008-of-00008.safetensors",
183
- "model.layers.31.attention_norm.weight": "model-00008-of-00008.safetensors",
184
- "model.layers.31.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
185
- "model.layers.31.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
186
- "model.layers.31.feed_forward.w3.weight": "model-00008-of-00008.safetensors",
187
- "model.layers.31.ffn_norm.weight": "model-00008-of-00008.safetensors",
188
- "model.layers.4.attention.wo.weight": "model-00002-of-00008.safetensors",
189
- "model.layers.4.attention.wqkv.weight": "model-00002-of-00008.safetensors",
190
- "model.layers.4.attention_norm.weight": "model-00002-of-00008.safetensors",
191
- "model.layers.4.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
192
- "model.layers.4.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
193
- "model.layers.4.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
194
- "model.layers.4.ffn_norm.weight": "model-00002-of-00008.safetensors",
195
- "model.layers.5.attention.wo.weight": "model-00002-of-00008.safetensors",
196
- "model.layers.5.attention.wqkv.weight": "model-00002-of-00008.safetensors",
197
- "model.layers.5.attention_norm.weight": "model-00002-of-00008.safetensors",
198
- "model.layers.5.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
199
- "model.layers.5.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
200
- "model.layers.5.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
201
- "model.layers.5.ffn_norm.weight": "model-00002-of-00008.safetensors",
202
- "model.layers.6.attention.wo.weight": "model-00002-of-00008.safetensors",
203
- "model.layers.6.attention.wqkv.weight": "model-00002-of-00008.safetensors",
204
- "model.layers.6.attention_norm.weight": "model-00002-of-00008.safetensors",
205
- "model.layers.6.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
206
- "model.layers.6.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
207
- "model.layers.6.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
208
- "model.layers.6.ffn_norm.weight": "model-00002-of-00008.safetensors",
209
- "model.layers.7.attention.wo.weight": "model-00002-of-00008.safetensors",
210
- "model.layers.7.attention.wqkv.weight": "model-00002-of-00008.safetensors",
211
- "model.layers.7.attention_norm.weight": "model-00003-of-00008.safetensors",
212
- "model.layers.7.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
213
- "model.layers.7.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
214
- "model.layers.7.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
215
- "model.layers.7.ffn_norm.weight": "model-00003-of-00008.safetensors",
216
- "model.layers.8.attention.wo.weight": "model-00003-of-00008.safetensors",
217
- "model.layers.8.attention.wqkv.weight": "model-00003-of-00008.safetensors",
218
- "model.layers.8.attention_norm.weight": "model-00003-of-00008.safetensors",
219
- "model.layers.8.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
220
- "model.layers.8.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
221
- "model.layers.8.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
222
- "model.layers.8.ffn_norm.weight": "model-00003-of-00008.safetensors",
223
- "model.layers.9.attention.wo.weight": "model-00003-of-00008.safetensors",
224
- "model.layers.9.attention.wqkv.weight": "model-00003-of-00008.safetensors",
225
- "model.layers.9.attention_norm.weight": "model-00003-of-00008.safetensors",
226
- "model.layers.9.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
227
- "model.layers.9.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
228
- "model.layers.9.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
229
- "model.layers.9.ffn_norm.weight": "model-00003-of-00008.safetensors",
230
- "model.norm.weight": "model-00008-of-00008.safetensors",
231
- "model.tok_embeddings.weight": "model-00001-of-00008.safetensors",
232
- "output.weight": "model-00008-of-00008.safetensors"
233
- }
234
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modeling_internlm2.py CHANGED
@@ -13,10 +13,11 @@
13
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
  # See the License for the specific language governing permissions and
15
  # limitations under the License.
16
- """PyTorch InternLM2 model."""
17
  import math
18
  import queue
19
  import threading
 
20
  from typing import List, Optional, Tuple, Union
21
 
22
  import torch
@@ -26,50 +27,49 @@ from einops import rearrange
26
  from torch import nn
27
  from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
  from transformers.activations import ACT2FN
29
- from transformers.cache_utils import Cache, DynamicCache, StaticCache
30
- from transformers.modeling_attn_mask_utils import AttentionMaskConverter
31
  from transformers.modeling_outputs import (
32
  BaseModelOutputWithPast,
33
  CausalLMOutputWithPast,
34
- QuestionAnsweringModelOutput,
35
  SequenceClassifierOutputWithPast,
36
- TokenClassifierOutput,
37
  )
38
  from transformers.modeling_utils import PreTrainedModel
39
- from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
40
  from transformers.utils import (
41
  add_start_docstrings,
42
  add_start_docstrings_to_model_forward,
43
- is_flash_attn_greater_or_equal_2_10,
44
  logging,
45
  replace_return_docstrings,
46
  )
47
 
48
  try:
49
  from transformers.generation.streamers import BaseStreamer
50
- except Exception:
51
  BaseStreamer = None
52
 
53
- from .configuration_internlm2 import InternLM2Config
54
-
55
-
56
- try:
57
- from flash_attn import flash_attn_func, flash_attn_varlen_func
58
- from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
59
- except:
60
- pass
61
-
62
 
63
  logger = logging.get_logger(__name__)
64
 
65
  _CONFIG_FOR_DOC = "InternLM2Config"
66
 
67
-
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  def _get_unpad_data(attention_mask):
69
  seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
70
  indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
71
  max_seqlen_in_batch = seqlens_in_batch.max().item()
72
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) # pylint: disable=E1102
73
  return (
74
  indices,
75
  cu_seqlens,
@@ -77,10 +77,45 @@ def _get_unpad_data(attention_mask):
77
  )
78
 
79
 
80
- class InternLM2RMSNorm(nn.Module):
81
- """InternLM2RMSNorm is equivalent to T5LayerNorm."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
 
 
83
  def __init__(self, hidden_size, eps=1e-6):
 
 
 
84
  super().__init__()
85
  self.weight = nn.Parameter(torch.ones(hidden_size))
86
  self.variance_epsilon = eps
@@ -93,68 +128,93 @@ class InternLM2RMSNorm(nn.Module):
93
  return self.weight * hidden_states.to(input_dtype)
94
 
95
 
96
- ALL_LAYERNORM_LAYERS.append(InternLM2RMSNorm)
97
-
98
-
99
  class InternLM2RotaryEmbedding(nn.Module):
100
- """Rotary Position Embedding for the InternLM2 model. Credits to the Reddit user /u/lucidrains."""
101
-
102
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
103
  super().__init__()
104
- self.scaling_factor = scaling_factor
105
  self.dim = dim
106
  self.max_position_embeddings = max_position_embeddings
107
  self.base = base
108
- inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
109
  self.register_buffer("inv_freq", inv_freq, persistent=False)
110
- # For BC we register cos and sin cached
111
- self.max_seq_len_cached = max_position_embeddings
112
 
113
- @torch.no_grad()
114
- def forward(self, x, position_ids):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  # x: [bs, num_attention_heads, seq_len, head_size]
116
- inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
117
- position_ids_expanded = position_ids[:, None, :].float()
118
- # Force float32 since bfloat16 loses precision on long contexts
119
- # See https://github.com/huggingface/transformers/pull/29285
120
- device_type = x.device.type
121
- device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
122
- with torch.autocast(device_type=device_type, enabled=False):
123
- freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
124
- emb = torch.cat((freqs, freqs), dim=-1)
125
- cos = emb.cos()
126
- sin = emb.sin()
127
- return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
128
 
 
 
 
 
129
 
 
 
130
  class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
131
  """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
132
 
133
- def forward(self, x, position_ids):
134
- # difference to the original RoPE: a scaling factor is aplied to the position ids
135
- position_ids = position_ids.float() / self.scaling_factor
136
- cos, sin = super().forward(x, position_ids)
137
- return cos, sin
 
 
 
 
 
 
 
 
 
138
 
139
 
 
140
  class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
141
  """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
142
- Credits to the Reddit users /u/bloc97 and /u/emozilla"""
 
 
 
 
 
 
 
 
143
 
144
- def forward(self, x, position_ids):
145
- # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length
146
- seq_len = torch.max(position_ids) + 1
147
  if seq_len > self.max_position_embeddings:
148
  base = self.base * (
149
  (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
150
  ) ** (self.dim / (self.dim - 2))
151
- inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim))
152
- self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation
153
 
154
- cos, sin = super().forward(x, position_ids)
155
- return cos, sin
156
 
 
 
 
 
 
157
 
 
 
158
  def rotate_half(x):
159
  """Rotates half the hidden dims of the input."""
160
  x1 = x[..., : x.shape[-1] // 2]
@@ -162,36 +222,17 @@ def rotate_half(x):
162
  return torch.cat((-x2, x1), dim=-1)
163
 
164
 
165
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): # pylint: disable=unused-argument
166
- """Applies Rotary Position Embedding to the query and key tensors.
167
-
168
- Args:
169
- q (`torch.Tensor`): The query tensor.
170
- k (`torch.Tensor`): The key tensor.
171
- cos (`torch.Tensor`): The cosine part of the rotary embedding.
172
- sin (`torch.Tensor`): The sine part of the rotary embedding.
173
- position_ids (`torch.Tensor`, *optional*):
174
- Deprecated and unused.
175
- unsqueeze_dim (`int`, *optional*, defaults to 1):
176
- The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
177
- sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
178
- that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
179
- k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
180
- cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
181
- the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
182
- Returns:
183
- `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
184
- """
185
- cos = cos.unsqueeze(unsqueeze_dim)
186
- sin = sin.unsqueeze(unsqueeze_dim)
187
  q_embed = (q * cos) + (rotate_half(q) * sin)
188
  k_embed = (k * cos) + (rotate_half(k) * sin)
189
  return q_embed, k_embed
190
 
191
 
192
  class InternLM2MLP(nn.Module):
193
- """MLP for InternLM2 model."""
194
-
195
  def __init__(self, config):
196
  super().__init__()
197
  self.config = config
@@ -208,6 +249,7 @@ class InternLM2MLP(nn.Module):
208
  return down_proj
209
 
210
 
 
211
  def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
212
  """
213
  This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
@@ -220,27 +262,19 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
220
  return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
221
 
222
 
 
223
  class InternLM2Attention(nn.Module):
224
  """Multi-headed attention from 'Attention Is All You Need' paper"""
225
 
226
- def __init__(self, config: InternLM2Config, layer_idx: Optional[int] = None):
227
  super().__init__()
228
  self.config = config
229
- self.layer_idx = layer_idx
230
- if layer_idx is None:
231
- logger.warning_once(
232
- f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
233
- "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
234
- "when creating this class."
235
- )
236
-
237
  self.hidden_size = config.hidden_size
238
  self.num_heads = config.num_attention_heads
239
  self.head_dim = self.hidden_size // self.num_heads
240
  self.num_key_value_heads = config.num_key_value_heads
241
  self.num_key_value_groups = self.num_heads // self.num_key_value_heads
242
  self.max_position_embeddings = config.max_position_embeddings
243
- self.rope_theta = config.rope_theta
244
  self.is_causal = True
245
 
246
  if (self.head_dim * self.num_heads) != self.hidden_size:
@@ -254,8 +288,8 @@ class InternLM2Attention(nn.Module):
254
  (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
255
  bias=config.bias,
256
  )
257
- self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
258
 
 
259
  self._init_rope()
260
 
261
  def _init_rope(self):
@@ -263,49 +297,51 @@ class InternLM2Attention(nn.Module):
263
  self.rotary_emb = InternLM2RotaryEmbedding(
264
  self.head_dim,
265
  max_position_embeddings=self.max_position_embeddings,
266
- base=self.rope_theta,
267
  )
268
  else:
269
  scaling_type = self.config.rope_scaling["type"]
270
  scaling_factor = self.config.rope_scaling["factor"]
271
- if scaling_type == "linear":
272
- self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
273
  self.head_dim,
274
  max_position_embeddings=self.max_position_embeddings,
 
275
  scaling_factor=scaling_factor,
276
- base=self.rope_theta,
277
  )
278
- elif scaling_type == "dynamic":
279
- self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
280
  self.head_dim,
281
  max_position_embeddings=self.max_position_embeddings,
 
282
  scaling_factor=scaling_factor,
283
- base=self.rope_theta,
284
  )
285
  else:
286
- raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
 
 
 
 
287
 
288
  def forward(
289
  self,
290
  hidden_states: torch.Tensor,
291
  attention_mask: Optional[torch.Tensor] = None,
292
  position_ids: Optional[torch.LongTensor] = None,
293
- past_key_value: Optional[Cache] = None,
294
  output_attentions: bool = False,
295
- use_cache: bool = False, # pylint: disable=unused-argument
296
- cache_position: Optional[torch.LongTensor] = None,
297
  ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
 
 
 
 
 
 
298
  bsz, q_len, _ = hidden_states.size()
299
 
300
- if self.config.pretraining_tp > 1:
301
- # split qkv_states by tp size
302
- key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
303
- qkv_slices = self.wqkv.weight.split(key_value_slicing, dim=0)
304
- qkv_states = torch.cat(
305
- [F.linear(hidden_states, qkv_slice) for qkv_slice in qkv_slices], dim=-1 # pylint: disable=E1102
306
- )
307
- else:
308
- qkv_states = self.wqkv(hidden_states)
309
 
310
  qkv_states = rearrange(
311
  qkv_states,
@@ -315,26 +351,44 @@ class InternLM2Attention(nn.Module):
315
  )
316
 
317
  query_states = qkv_states[..., : self.num_key_value_groups, :]
318
- query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d").transpose(1, 2)
319
- key_states = qkv_states[..., -2, :].transpose(1, 2)
320
- value_states = qkv_states[..., -1, :].transpose(1, 2)
321
 
322
- cos, sin = self.rotary_emb(value_states, position_ids)
 
 
 
 
 
 
 
323
  query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
324
 
325
  if past_key_value is not None:
326
- # sin and cos are specific to RoPE models; cache_position needed for the static cache
327
- cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
328
- key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
 
 
329
 
330
  key_states = repeat_kv(key_states, self.num_key_value_groups)
331
  value_states = repeat_kv(value_states, self.num_key_value_groups)
332
 
333
  attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
334
 
335
- if attention_mask is not None: # no matter the length, we just slice it
336
- causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
337
- attn_weights = attn_weights + causal_mask
 
 
 
 
 
 
 
 
 
338
 
339
  # upcast attention to fp32
340
  attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
@@ -347,20 +401,9 @@ class InternLM2Attention(nn.Module):
347
  )
348
 
349
  attn_output = attn_output.transpose(1, 2).contiguous()
350
-
351
  attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
352
 
353
- if self.config.pretraining_tp > 1:
354
- attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
355
- o_proj_slices = self.wo.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
356
- attn_output = sum(
357
- [
358
- F.linear(attn_output[i], o_proj_slices[i]) # pylint: disable=E1102
359
- for i in range(self.config.pretraining_tp)
360
- ]
361
- )
362
- else:
363
- attn_output = self.wo(attn_output)
364
 
365
  if not output_attentions:
366
  attn_weights = None
@@ -368,6 +411,7 @@ class InternLM2Attention(nn.Module):
368
  return attn_output, attn_weights, past_key_value
369
 
370
 
 
371
  class InternLM2FlashAttention2(InternLM2Attention):
372
  """
373
  InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
@@ -375,34 +419,26 @@ class InternLM2FlashAttention2(InternLM2Attention):
375
  flash attention and deal with padding tokens in case the input contains any of them.
376
  """
377
 
378
- def __init__(self, *args, **kwargs):
379
- super().__init__(*args, **kwargs)
380
-
381
- # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
382
- # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement,
383
- # that was made default for flash_attn>=2.1. This attribute is used to handle this difference.
384
- # Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
385
- # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1)
386
- # produces a wrong mask (top-left).
387
- self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
388
-
389
  def forward(
390
  self,
391
  hidden_states: torch.Tensor,
392
  attention_mask: Optional[torch.LongTensor] = None,
393
  position_ids: Optional[torch.LongTensor] = None,
394
- past_key_value: Optional[Cache] = None,
395
  output_attentions: bool = False,
396
  use_cache: bool = False,
397
- cache_position: Optional[torch.LongTensor] = None,
398
  ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
399
- if isinstance(past_key_value, StaticCache):
400
- raise ValueError(
401
- "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
402
- "make sure to use `sdpa` in the mean time, and open an issue at "
403
- "https://github.com/huggingface/transformers"
404
  )
405
 
 
 
 
406
  output_attentions = False
407
 
408
  bsz, q_len, _ = hidden_states.size()
@@ -425,61 +461,37 @@ class InternLM2FlashAttention2(InternLM2Attention):
425
  key_states = key_states.transpose(1, 2)
426
  value_states = value_states.transpose(1, 2)
427
 
428
- cos, sin = self.rotary_emb(value_states, position_ids)
429
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
 
 
 
 
 
430
 
431
  if past_key_value is not None:
432
- # sin and cos are specific to RoPE models; cache_position needed for the static cache
433
- cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
434
- key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
 
 
435
 
436
- # TODO: These transpose are quite inefficient but Flash Attention requires the layout
437
- # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
438
- # to be able to avoid many of these transpose/reshape/view.
439
  query_states = query_states.transpose(1, 2)
440
  key_states = key_states.transpose(1, 2)
441
  value_states = value_states.transpose(1, 2)
442
 
443
- # dropout_rate = self.attention_dropout if self.training else 0.0
444
- dropout_rate = 0.0
445
-
446
- # In PEFT, usually we cast the layer norms in float32 for training stability reasons
447
- # therefore the input hidden states gets silently casted in float32. Hence, we need
448
- # cast them back in the correct dtype just to be sure everything works as expected.
449
- # This might slowdown training & inference so it is recommended to not cast the LayerNorms
450
- # in fp32. (InternLM2RMSNorm handles it correctly)
451
-
452
- input_dtype = query_states.dtype
453
- if input_dtype == torch.float32:
454
- if torch.is_autocast_enabled():
455
- target_dtype = torch.get_autocast_gpu_dtype()
456
- # Handle the case where the model is quantized
457
- elif hasattr(self.config, "_pre_quantization_dtype"):
458
- target_dtype = self.config._pre_quantization_dtype
459
- else:
460
- target_dtype = self.wqkv.weight.dtype
461
-
462
- logger.warning_once(
463
- f"The input hidden states seems to be silently casted in float32, this might be related to"
464
- f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
465
- f" {target_dtype}."
466
- )
467
-
468
- query_states = query_states.to(target_dtype)
469
- key_states = key_states.to(target_dtype)
470
- value_states = value_states.to(target_dtype)
471
 
472
  attn_output = self._flash_attention_forward(
473
  query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
474
  )
475
-
476
  attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
477
  attn_output = self.wo(attn_output)
478
 
479
  if not output_attentions:
480
  attn_weights = None
481
 
482
- return attn_output, attn_weights, past_key_value # pylint: disable=E0606
483
 
484
  def _flash_attention_forward(
485
  self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
@@ -498,29 +510,23 @@ class InternLM2FlashAttention2(InternLM2Attention):
498
  attention_mask (`torch.Tensor`):
499
  The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
500
  position of padding tokens and 1 for the position of non-padding tokens.
501
- dropout (`float`):
502
  Attention dropout
503
  softmax_scale (`float`, *optional*):
504
  The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
505
  """
506
- if not self._flash_attn_uses_top_left_mask:
507
- causal = self.is_causal
508
- else:
509
- # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1.
510
- # For details, please see the comment in InternLM2FlashAttention2 __init__.
511
- causal = self.is_causal and query_length != 1
512
-
513
  # Contains at least one padding token in the sequence
 
514
  if attention_mask is not None:
515
  batch_size = query_states.shape[0]
516
- query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
517
  query_states, key_states, value_states, attention_mask, query_length
518
  )
519
 
520
  cu_seqlens_q, cu_seqlens_k = cu_seq_lens
521
  max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
522
 
523
- attn_output_unpad = flash_attn_varlen_func( # pylint: disable=E0606
524
  query_states,
525
  key_states,
526
  value_states,
@@ -533,26 +539,27 @@ class InternLM2FlashAttention2(InternLM2Attention):
533
  causal=causal,
534
  )
535
 
536
- attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) # pylint: disable=E0606
537
  else:
538
- attn_output = flash_attn_func( # pylint: disable=E0606
539
  query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
540
  )
541
 
542
  return attn_output
543
 
544
- def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
545
  indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
546
  batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
547
 
548
- key_layer = index_first_axis( # pylint: disable=E0606
549
  key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
550
  )
551
- value_layer = index_first_axis( # pylint: disable=E0606
552
  value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
553
  )
 
554
  if query_length == kv_seq_len:
555
- query_layer = index_first_axis( # pylint: disable=E0606
556
  query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
557
  )
558
  cu_seqlens_q = cu_seqlens_k
@@ -568,139 +575,29 @@ class InternLM2FlashAttention2(InternLM2Attention):
568
  else:
569
  # The -q_len: slice assumes left padding.
570
  attention_mask = attention_mask[:, -query_length:]
571
- query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input( # pylint: disable=E0606
572
- query_layer, attention_mask
573
- )
574
 
575
  return (
576
  query_layer,
577
  key_layer,
578
  value_layer,
579
- indices_q,
580
  (cu_seqlens_q, cu_seqlens_k),
581
  (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
582
  )
583
 
584
-
585
- # Copied from transformers.models.llama.modeling_llama.LllamaSdpaAttention with Llama->InternLM2
586
- class InternLM2SdpaAttention(InternLM2Attention):
587
- """
588
- InternLM2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
589
- `InternLM2Attention` as the weights of the module stays untouched. The only changes are on the forward pass
590
- to adapt to SDPA API.
591
- """
592
-
593
- # Adapted from InternLM2Attention.forward
594
- def forward(
595
- self,
596
- hidden_states: torch.Tensor,
597
- attention_mask: Optional[torch.Tensor] = None,
598
- position_ids: Optional[torch.LongTensor] = None,
599
- past_key_value: Optional[Cache] = None,
600
- output_attentions: bool = False,
601
- use_cache: bool = False,
602
- cache_position: Optional[torch.LongTensor] = None,
603
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
604
- if output_attentions:
605
- # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"`
606
- # once this is implemented.
607
- logger.warning_once(
608
- "InternLM2Model uses InternLM2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` "
609
- "does not support `output_attentions=True`. Falling back to the manual attention implementation, "
610
- "but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. "
611
- 'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
612
- )
613
- return super().forward(
614
- hidden_states=hidden_states,
615
- attention_mask=attention_mask,
616
- position_ids=position_ids,
617
- past_key_value=past_key_value,
618
- output_attentions=output_attentions,
619
- use_cache=use_cache,
620
- cache_position=cache_position,
621
- )
622
-
623
- bsz, q_len, _ = hidden_states.size()
624
-
625
- qkv_states = self.wqkv(hidden_states)
626
-
627
- qkv_states = rearrange(
628
- qkv_states,
629
- "b q (h gs d) -> b q h gs d",
630
- gs=2 + self.num_key_value_groups,
631
- d=self.head_dim,
632
- )
633
-
634
- query_states = qkv_states[..., : self.num_key_value_groups, :]
635
- query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
636
- key_states = qkv_states[..., -2, :]
637
- value_states = qkv_states[..., -1, :]
638
-
639
- query_states = query_states.transpose(1, 2)
640
- key_states = key_states.transpose(1, 2)
641
- value_states = value_states.transpose(1, 2)
642
-
643
- cos, sin = self.rotary_emb(value_states, position_ids)
644
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
645
-
646
- if past_key_value is not None:
647
- # sin and cos are specific to RoPE models; cache_position needed for the static cache
648
- cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
649
- key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
650
-
651
- key_states = repeat_kv(key_states, self.num_key_value_groups)
652
- value_states = repeat_kv(value_states, self.num_key_value_groups)
653
-
654
- causal_mask = attention_mask
655
- if attention_mask is not None:
656
- causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
657
-
658
- # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with
659
- # custom attn_mask, Reference: https://github.com/pytorch/pytorch/issues/112577.
660
- if query_states.device.type == "cuda" and causal_mask is not None:
661
- query_states = query_states.contiguous()
662
- key_states = key_states.contiguous()
663
- value_states = value_states.contiguous()
664
-
665
- # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of
666
- # an inline conditional assignment in SDPA to support both torch.compile's dynamic shapes and full graph
667
- # options. An inline conditional prevents dynamic shapes from compiling.
668
- is_causal = bool(causal_mask is None and q_len > 1)
669
-
670
- attn_output = torch.nn.functional.scaled_dot_product_attention( # pylint: disable=E1102
671
- query_states,
672
- key_states,
673
- value_states,
674
- attn_mask=causal_mask,
675
- dropout_p=0.0,
676
- is_causal=is_causal,
677
- )
678
-
679
- attn_output = attn_output.transpose(1, 2).contiguous()
680
- attn_output = attn_output.view(bsz, q_len, self.hidden_size)
681
-
682
- attn_output = self.wo(attn_output)
683
-
684
- return attn_output, None, past_key_value
685
-
686
-
687
  INTERNLM2_ATTENTION_CLASSES = {
688
  "eager": InternLM2Attention,
689
  "flash_attention_2": InternLM2FlashAttention2,
690
- "sdpa": InternLM2SdpaAttention,
691
  }
692
 
693
-
694
- # Modified from transformers.models.llama.modeling_llama.LlamaDecoderLayer with Llama->InternLM2
695
  class InternLM2DecoderLayer(nn.Module):
696
- """InternLM2 Decoder Layer. This module is a single layer of the InternLM2 model."""
697
-
698
- def __init__(self, config: InternLM2Config, layer_idx: int):
699
  super().__init__()
700
  self.hidden_size = config.hidden_size
701
- self.layer_idx = layer_idx
702
 
703
- self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config, layer_idx=layer_idx)
704
 
705
  self.feed_forward = InternLM2MLP(config)
706
  self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@@ -711,10 +608,10 @@ class InternLM2DecoderLayer(nn.Module):
711
  hidden_states: torch.Tensor,
712
  attention_mask: Optional[torch.Tensor] = None,
713
  position_ids: Optional[torch.LongTensor] = None,
714
- past_key_value: Optional[Cache] = None,
715
  output_attentions: Optional[bool] = False,
716
  use_cache: Optional[bool] = False,
717
- cache_position: Optional[torch.LongTensor] = None,
718
  ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
719
  """
720
  Args:
@@ -730,6 +627,12 @@ class InternLM2DecoderLayer(nn.Module):
730
  (see `past_key_values`).
731
  past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
732
  """
 
 
 
 
 
 
733
  residual = hidden_states
734
 
735
  hidden_states = self.attention_norm(hidden_states)
@@ -742,7 +645,7 @@ class InternLM2DecoderLayer(nn.Module):
742
  past_key_value=past_key_value,
743
  output_attentions=output_attentions,
744
  use_cache=use_cache,
745
- cache_position=cache_position,
746
  )
747
  hidden_states = residual + hidden_states
748
 
@@ -786,20 +689,11 @@ InternLM2_START_DOCSTRING = r"""
786
  InternLM2_START_DOCSTRING,
787
  )
788
  class InternLM2PreTrainedModel(PreTrainedModel):
789
- """
790
- InternLM2 pretraiend model's base class.
791
- """
792
-
793
  config_class = InternLM2Config
794
  base_model_prefix = "model"
795
  supports_gradient_checkpointing = True
796
  _no_split_modules = ["InternLM2DecoderLayer"]
797
- _skip_keys_device_placement = ["past_key_values"]
798
- _supports_flash_attn_2 = True
799
- _supports_sdpa = True
800
- _supports_cache_class = True
801
- _supports_quantized_cache = True
802
- _supports_static_cache = True
803
 
804
  def _init_weights(self, module):
805
  std = self.config.initializer_range
@@ -848,19 +742,14 @@ InternLM2_INPUTS_DOCSTRING = r"""
848
  config.n_positions - 1]`.
849
 
850
  [What are position IDs?](../glossary#position-ids)
851
- past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
852
- Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
853
- blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
854
- returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
855
-
856
- Two formats are allowed:
857
- - a [`~cache_utils.Cache`] instance;
858
- - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
859
- shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
860
- cache format.
861
 
862
- The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
863
- legacy cache format will be returned.
864
 
865
  If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
866
  have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
@@ -880,14 +769,10 @@ InternLM2_INPUTS_DOCSTRING = r"""
880
  more detail.
881
  return_dict (`bool`, *optional*):
882
  Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
883
- cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
884
- Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
885
- this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
886
- the complete sequence length.
887
  """
888
 
889
 
890
- # Modified from transformers.models.llama.modeling_llama.LlamaModel with Llama->InternLM2
891
  @add_start_docstrings(
892
  "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
893
  InternLM2_START_DOCSTRING,
@@ -910,9 +795,7 @@ class InternLM2Model(InternLM2PreTrainedModel):
910
 
911
  self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
912
 
913
- self.layers = nn.ModuleList(
914
- [InternLM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
915
- )
916
  self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
917
 
918
  self.gradient_checkpointing = False
@@ -925,96 +808,142 @@ class InternLM2Model(InternLM2PreTrainedModel):
925
  def set_input_embeddings(self, value):
926
  self.tok_embeddings = value
927
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
928
  @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
929
  def forward(
930
  self,
931
  input_ids: torch.LongTensor = None,
932
  attention_mask: Optional[torch.Tensor] = None,
933
  position_ids: Optional[torch.LongTensor] = None,
934
- past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
935
  inputs_embeds: Optional[torch.FloatTensor] = None,
936
  use_cache: Optional[bool] = None,
937
  output_attentions: Optional[bool] = None,
938
  output_hidden_states: Optional[bool] = None,
939
  return_dict: Optional[bool] = None,
940
- cache_position: Optional[torch.LongTensor] = None,
941
  ) -> Union[Tuple, BaseModelOutputWithPast]:
942
  output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
943
  output_hidden_states = (
944
  output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
945
  )
946
  use_cache = use_cache if use_cache is not None else self.config.use_cache
 
947
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
948
 
949
- if (input_ids is None) ^ (inputs_embeds is not None):
950
- raise ValueError(
951
- "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
952
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
953
 
954
- if self.gradient_checkpointing and self.training and use_cache:
955
- logger.warning_once(
956
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
 
957
  )
958
- use_cache = False
959
 
960
  if inputs_embeds is None:
961
  inputs_embeds = self.tok_embeddings(input_ids)
962
 
963
- return_legacy_cache = False
964
- if use_cache and not isinstance(past_key_values, Cache): # kept for BC (non `Cache` `past_key_values` inputs)
965
- return_legacy_cache = True
966
- past_key_values = DynamicCache.from_legacy_cache(past_key_values)
967
-
968
- if cache_position is None:
969
- past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
970
- cache_position = torch.arange(
971
- past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
 
972
  )
973
- if position_ids is None:
974
- position_ids = cache_position.unsqueeze(0)
975
-
976
- causal_mask = self._update_causal_mask(
977
- attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
978
- )
979
 
980
  # embed positions
981
  hidden_states = inputs_embeds
982
 
 
 
 
 
 
 
 
983
  # decoder layers
984
  all_hidden_states = () if output_hidden_states else None
985
  all_self_attns = () if output_attentions else None
986
- next_decoder_cache = None
987
 
988
- for decoder_layer in self.layers:
989
  if output_hidden_states:
990
  all_hidden_states += (hidden_states,)
991
 
 
 
992
  if self.gradient_checkpointing and self.training:
993
- layer_outputs = self._gradient_checkpointing_func(
994
- decoder_layer.__call__,
 
 
 
 
 
 
 
 
995
  hidden_states,
996
- causal_mask,
997
  position_ids,
998
- past_key_values,
999
- output_attentions,
1000
- use_cache,
1001
- cache_position,
1002
  )
1003
  else:
1004
  layer_outputs = decoder_layer(
1005
  hidden_states,
1006
- attention_mask=causal_mask,
1007
  position_ids=position_ids,
1008
- past_key_value=past_key_values,
1009
  output_attentions=output_attentions,
1010
  use_cache=use_cache,
1011
- cache_position=cache_position,
1012
  )
1013
 
1014
  hidden_states = layer_outputs[0]
1015
 
1016
  if use_cache:
1017
- next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1018
 
1019
  if output_attentions:
1020
  all_self_attns += (layer_outputs[1],)
@@ -1026,9 +955,6 @@ class InternLM2Model(InternLM2PreTrainedModel):
1026
  all_hidden_states += (hidden_states,)
1027
 
1028
  next_cache = next_decoder_cache if use_cache else None
1029
- if return_legacy_cache:
1030
- next_cache = next_cache.to_legacy_cache()
1031
-
1032
  if not return_dict:
1033
  return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1034
  return BaseModelOutputWithPast(
@@ -1038,91 +964,11 @@ class InternLM2Model(InternLM2PreTrainedModel):
1038
  attentions=all_self_attns,
1039
  )
1040
 
1041
- def _update_causal_mask(
1042
- self,
1043
- attention_mask: torch.Tensor,
1044
- input_tensor: torch.Tensor,
1045
- cache_position: torch.Tensor,
1046
- past_key_values: Cache,
1047
- output_attentions: bool,
1048
- ):
1049
- # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length
1050
- # even when the static KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at
1051
- # each decode steps due to the dynamic shapes. (`recording cudagraph tree for symint key 13`, etc.), which is
1052
- # VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using `fullgraph=True`.
1053
- # See more context in https://github.com/huggingface/transformers/pull/29114
1054
-
1055
- if self.config.attn_implementation == "flash_attention_2":
1056
- if attention_mask is not None and 0.0 in attention_mask:
1057
- return attention_mask
1058
- return None
1059
-
1060
- # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1061
- # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1062
- # to infer the attention mask.
1063
- past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1064
- using_static_cache = isinstance(past_key_values, StaticCache)
1065
-
1066
- # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1067
- if self.config.attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
1068
- if AttentionMaskConverter._ignore_causal_mask_sdpa(
1069
- attention_mask,
1070
- inputs_embeds=input_tensor,
1071
- past_key_values_length=past_seen_tokens,
1072
- is_training=self.training,
1073
- ):
1074
- return None
1075
-
1076
- dtype, device = input_tensor.dtype, input_tensor.device
1077
- min_dtype = torch.finfo(dtype).min
1078
- sequence_length = input_tensor.shape[1]
1079
- if using_static_cache:
1080
- target_length = past_key_values.get_max_length()
1081
- else:
1082
- target_length = (
1083
- attention_mask.shape[-1]
1084
- if isinstance(attention_mask, torch.Tensor)
1085
- else past_seen_tokens + sequence_length + 1
1086
- )
1087
 
1088
- if attention_mask is not None and attention_mask.dim() == 4:
1089
- # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing
1090
- if attention_mask.max() != 0:
1091
- raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`")
1092
- causal_mask = attention_mask
1093
- else:
1094
- causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1095
- if sequence_length != 1:
1096
- causal_mask = torch.triu(causal_mask, diagonal=1)
1097
- causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1098
- causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1099
- if attention_mask is not None:
1100
- causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1101
- mask_length = attention_mask.shape[-1]
1102
- padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
1103
- padding_mask = padding_mask == 0
1104
- causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1105
- padding_mask, min_dtype
1106
- )
1107
- if (
1108
- self.config.attn_implementation == "sdpa"
1109
- and attention_mask is not None
1110
- and attention_mask.device.type == "cuda"
1111
- and not output_attentions
1112
- ):
1113
- # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1114
- # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1115
- # Details: https://github.com/pytorch/pytorch/issues/110213
1116
- causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) # pylint: disable=E1120
1117
-
1118
- return causal_mask
1119
-
1120
-
1121
- # Modified from transformers.models.llama.modeling_llama.LlamaForCausalLM
1122
  class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1123
- """Causal language model (CLM) for InternLM2."""
1124
-
1125
  _auto_class = "AutoModelForCausalLM"
 
1126
  _tied_weights_keys = ["output.weight"]
1127
 
1128
  def __init__(self, config):
@@ -1159,14 +1005,13 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1159
  input_ids: torch.LongTensor = None,
1160
  attention_mask: Optional[torch.Tensor] = None,
1161
  position_ids: Optional[torch.LongTensor] = None,
1162
- past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1163
  inputs_embeds: Optional[torch.FloatTensor] = None,
1164
  labels: Optional[torch.LongTensor] = None,
1165
  use_cache: Optional[bool] = None,
1166
  output_attentions: Optional[bool] = None,
1167
  output_hidden_states: Optional[bool] = None,
1168
  return_dict: Optional[bool] = None,
1169
- cache_position: Optional[torch.LongTensor] = None,
1170
  ) -> Union[Tuple, CausalLMOutputWithPast]:
1171
  r"""
1172
  Args:
@@ -1182,8 +1027,8 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1182
  ```python
1183
  >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1184
 
1185
- >>> model = InternLM2ForCausalLM.from_pretrained("meta-InternLM2/InternLM2-2-7b-hf")
1186
- >>> tokenizer = AutoTokenizer.from_pretrained("meta-InternLM2/InternLM2-2-7b-hf")
1187
 
1188
  >>> prompt = "Hey, are you conscious? Can you talk to me?"
1189
  >>> inputs = tokenizer(prompt, return_tensors="pt")
@@ -1211,19 +1056,10 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1211
  output_attentions=output_attentions,
1212
  output_hidden_states=output_hidden_states,
1213
  return_dict=return_dict,
1214
- cache_position=cache_position,
1215
  )
1216
 
1217
  hidden_states = outputs[0]
1218
- if self.config.pretraining_tp > 1:
1219
- output_slices = self.output.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
1220
- logits = [
1221
- F.linear(hidden_states, output_slices[i]) # pylint: disable=not-callable
1222
- for i in range(self.config.pretraining_tp)
1223
- ]
1224
- logits = torch.cat(logits, dim=-1)
1225
- else:
1226
- logits = self.output(hidden_states)
1227
  logits = logits.float()
1228
 
1229
  loss = None
@@ -1252,48 +1088,19 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1252
  )
1253
 
1254
  def prepare_inputs_for_generation(
1255
- self,
1256
- input_ids,
1257
- past_key_values=None,
1258
- attention_mask=None,
1259
- inputs_embeds=None,
1260
- cache_position=None,
1261
- use_cache=True,
1262
- **kwargs,
1263
  ):
1264
- past_length = 0
1265
  if past_key_values is not None:
1266
- if isinstance(past_key_values, Cache):
1267
- past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
1268
- max_cache_length = (
1269
- torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
1270
- if past_key_values.get_max_length() is not None
1271
- else None
1272
- )
1273
- cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
1274
- # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
1275
  else:
1276
- cache_length = past_length = past_key_values[0][0].shape[2]
1277
- max_cache_length = None
1278
-
1279
- # Keep only the unprocessed tokens:
1280
- # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1281
- # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as input)
1282
- if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1283
- input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1284
- # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1285
- # input_ids based on the past_length.
1286
- elif past_length < input_ids.shape[1]:
1287
- input_ids = input_ids[:, past_length:]
1288
- # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1289
-
1290
- # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1291
- if (
1292
- max_cache_length is not None
1293
- and attention_mask is not None
1294
- and cache_length + input_ids.shape[1] > max_cache_length
1295
- ):
1296
- attention_mask = attention_mask[:, -max_cache_length:] # pylint: disable=E1130
1297
 
1298
  position_ids = kwargs.get("position_ids", None)
1299
  if attention_mask is not None and position_ids is None:
@@ -1307,24 +1114,13 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1307
  if inputs_embeds is not None and past_key_values is None:
1308
  model_inputs = {"inputs_embeds": inputs_embeds}
1309
  else:
1310
- # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
1311
- # recompiles graphs as the stride of the inputs is a guard.
1312
- # Ref: https://github.com/huggingface/transformers/pull/29114
1313
- # TODO: use `next_tokens` directly instead.
1314
- model_inputs = {"input_ids": input_ids.contiguous()}
1315
-
1316
- input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
1317
- if cache_position is None:
1318
- cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
1319
- elif use_cache:
1320
- cache_position = cache_position[-input_length:]
1321
 
1322
  model_inputs.update(
1323
  {
1324
  "position_ids": position_ids,
1325
- "cache_position": cache_position,
1326
  "past_key_values": past_key_values,
1327
- "use_cache": use_cache,
1328
  "attention_mask": attention_mask,
1329
  }
1330
  )
@@ -1339,18 +1135,15 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1339
  )
1340
  return reordered_past
1341
 
1342
- def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, meta_instruction=""):
1343
- if history is None:
1344
- history = []
1345
- if tokenizer.add_bos_token:
1346
- prompt = ""
1347
- else:
1348
- prompt = tokenizer.bos_token
1349
  if meta_instruction:
1350
- prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
 
 
1351
  for record in history:
1352
- prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1353
- prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1354
  return tokenizer([prompt], return_tensors="pt")
1355
 
1356
  @torch.no_grad()
@@ -1358,25 +1151,21 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1358
  self,
1359
  tokenizer,
1360
  query: str,
1361
- history: Optional[List[Tuple[str, str]]] = None,
1362
  streamer: Optional[BaseStreamer] = None,
1363
  max_new_tokens: int = 1024,
1364
  do_sample: bool = True,
1365
  temperature: float = 0.8,
1366
  top_p: float = 0.8,
1367
  meta_instruction: str = "You are an AI assistant whose name is InternLM (书生·浦语).\n"
1368
- "- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory "
1369
- "(上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n"
1370
- "- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such "
1371
- "as English and 中文.",
1372
  **kwargs,
1373
  ):
1374
- if history is None:
1375
- history = []
1376
  inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1377
  inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1378
  # also add end-of-assistant token in eos token id to avoid unnecessary generation
1379
- eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["<|im_end|>"])[0]]
1380
  outputs = self.generate(
1381
  **inputs,
1382
  streamer=streamer,
@@ -1389,7 +1178,7 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1389
  )
1390
  outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :]
1391
  response = tokenizer.decode(outputs, skip_special_tokens=True)
1392
- response = response.split("<|im_end|>")[0]
1393
  history = history + [(query, response)]
1394
  return response, history
1395
 
@@ -1398,15 +1187,13 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1398
  self,
1399
  tokenizer,
1400
  query: str,
1401
- history: List[Tuple[str, str]] = None,
1402
  max_new_tokens: int = 1024,
1403
  do_sample: bool = True,
1404
  temperature: float = 0.8,
1405
  top_p: float = 0.8,
1406
  **kwargs,
1407
  ):
1408
- if history is None:
1409
- history = []
1410
  """
1411
  Return a generator in format: (response, history)
1412
  Eg.
@@ -1422,10 +1209,6 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1422
  response_queue = queue.Queue(maxsize=20)
1423
 
1424
  class ChatStreamer(BaseStreamer):
1425
- """
1426
- Streamer used in generate to print words one by one.
1427
- """
1428
-
1429
  def __init__(self, tokenizer) -> None:
1430
  super().__init__()
1431
  self.tokenizer = tokenizer
@@ -1433,7 +1216,6 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1433
  self.query = query
1434
  self.history = history
1435
  self.response = ""
1436
- self.cache = []
1437
  self.received_inputs = False
1438
  self.queue.put((self.response, history + [(self.query, self.response)]))
1439
 
@@ -1448,15 +1230,11 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1448
  self.received_inputs = True
1449
  return
1450
 
1451
- self.cache.extend(value.tolist())
1452
- token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
1453
- if token.strip() != "<|im_end|>":
1454
  self.response = self.response + token
1455
  history = self.history + [(self.query, self.response)]
1456
  self.queue.put((self.response, history))
1457
- self.cache = []
1458
- else:
1459
- self.end()
1460
 
1461
  def end(self):
1462
  self.queue.put(None)
@@ -1486,13 +1264,13 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1486
  return consumer()
1487
 
1488
 
1489
- # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1490
  @add_start_docstrings(
1491
  """
1492
  The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1493
 
1494
- [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1495
- (e.g. GPT-2) do.
1496
 
1497
  Since it does classification on the last token, it requires to know the position of the last token. If a
1498
  `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
@@ -1503,8 +1281,6 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1503
  InternLM2_START_DOCSTRING,
1504
  )
1505
  class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1506
- """Sequence Classification Head for InternLM2 Model."""
1507
-
1508
  def __init__(self, config):
1509
  super().__init__(config)
1510
  self.num_labels = config.num_labels
@@ -1526,7 +1302,7 @@ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1526
  input_ids: torch.LongTensor = None,
1527
  attention_mask: Optional[torch.Tensor] = None,
1528
  position_ids: Optional[torch.LongTensor] = None,
1529
- past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1530
  inputs_embeds: Optional[torch.FloatTensor] = None,
1531
  labels: Optional[torch.LongTensor] = None,
1532
  use_cache: Optional[bool] = None,
@@ -1567,10 +1343,9 @@ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1567
  sequence_lengths = -1
1568
  else:
1569
  if input_ids is not None:
1570
- # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1571
- sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1572
- sequence_lengths = sequence_lengths % input_ids.shape[-1]
1573
- sequence_lengths = sequence_lengths.to(logits.device)
1574
  else:
1575
  sequence_lengths = -1
1576
 
@@ -1582,7 +1357,7 @@ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1582
  if self.config.problem_type is None:
1583
  if self.num_labels == 1:
1584
  self.config.problem_type = "regression"
1585
- elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)):
1586
  self.config.problem_type = "single_label_classification"
1587
  else:
1588
  self.config.problem_type = "multi_label_classification"
@@ -1610,191 +1385,3 @@ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1610
  hidden_states=transformer_outputs.hidden_states,
1611
  attentions=transformer_outputs.attentions,
1612
  )
1613
-
1614
-
1615
- # Copied from transformers.models.llama.modeling_llama.LlamaForQuestionAnswering with Llama->InternLM2
1616
- @add_start_docstrings(
1617
- """
1618
- The InternLM2 Model transformer with a span classification head on top for extractive question-answering tasks like
1619
- SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1620
- """,
1621
- InternLM2_START_DOCSTRING,
1622
- )
1623
- class InternLM2ForQuestionAnswering(InternLM2PreTrainedModel):
1624
- """Question Answering model for InternLM2."""
1625
-
1626
- base_model_prefix = "transformer"
1627
-
1628
- def __init__(self, config):
1629
- super().__init__(config)
1630
- self.transformer = InternLM2Model(config)
1631
- self.qa_outputs = nn.Linear(config.hidden_size, 2)
1632
-
1633
- # Initialize weights and apply final processing
1634
- self.post_init()
1635
-
1636
- def get_input_embeddings(self):
1637
- return self.transformer.tok_embeddings
1638
-
1639
- def set_input_embeddings(self, value):
1640
- self.transformer.tok_embeddings = value
1641
-
1642
- @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1643
- def forward(
1644
- self,
1645
- input_ids: Optional[torch.LongTensor] = None,
1646
- attention_mask: Optional[torch.FloatTensor] = None,
1647
- position_ids: Optional[torch.LongTensor] = None,
1648
- past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1649
- inputs_embeds: Optional[torch.FloatTensor] = None,
1650
- start_positions: Optional[torch.LongTensor] = None,
1651
- end_positions: Optional[torch.LongTensor] = None,
1652
- output_attentions: Optional[bool] = None,
1653
- output_hidden_states: Optional[bool] = None,
1654
- return_dict: Optional[bool] = None,
1655
- ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1656
- r"""
1657
- start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1658
- Labels for position (index) of the start of the labelled span for computing the token classification loss.
1659
- Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1660
- are not taken into account for computing the loss.
1661
- end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1662
- Labels for position (index) of the end of the labelled span for computing the token classification loss.
1663
- Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1664
- are not taken into account for computing the loss.
1665
- """
1666
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1667
-
1668
- outputs = self.transformer(
1669
- input_ids,
1670
- attention_mask=attention_mask,
1671
- position_ids=position_ids,
1672
- past_key_values=past_key_values,
1673
- inputs_embeds=inputs_embeds,
1674
- output_attentions=output_attentions,
1675
- output_hidden_states=output_hidden_states,
1676
- return_dict=return_dict,
1677
- )
1678
-
1679
- sequence_output = outputs[0]
1680
-
1681
- logits = self.qa_outputs(sequence_output)
1682
- start_logits, end_logits = logits.split(1, dim=-1)
1683
- start_logits = start_logits.squeeze(-1).contiguous()
1684
- end_logits = end_logits.squeeze(-1).contiguous()
1685
-
1686
- total_loss = None
1687
- if start_positions is not None and end_positions is not None:
1688
- # If we are on multi-GPU, split add a dimension
1689
- if len(start_positions.size()) > 1:
1690
- start_positions = start_positions.squeeze(-1).to(start_logits.device)
1691
- if len(end_positions.size()) > 1:
1692
- end_positions = end_positions.squeeze(-1).to(end_logits.device)
1693
- # sometimes the start/end positions are outside our model inputs, we ignore these terms
1694
- ignored_index = start_logits.size(1)
1695
- start_positions = start_positions.clamp(0, ignored_index)
1696
- end_positions = end_positions.clamp(0, ignored_index)
1697
-
1698
- loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1699
- start_loss = loss_fct(start_logits, start_positions)
1700
- end_loss = loss_fct(end_logits, end_positions)
1701
- total_loss = (start_loss + end_loss) / 2
1702
-
1703
- if not return_dict:
1704
- output = (start_logits, end_logits) + outputs[2:]
1705
- return ((total_loss,) + output) if total_loss is not None else output
1706
-
1707
- return QuestionAnsweringModelOutput(
1708
- loss=total_loss,
1709
- start_logits=start_logits,
1710
- end_logits=end_logits,
1711
- hidden_states=outputs.hidden_states,
1712
- attentions=outputs.attentions,
1713
- )
1714
-
1715
-
1716
- # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->InternLM2
1717
- @add_start_docstrings(
1718
- """
1719
- The InternLM2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1720
- output) e.g. for Named-Entity-Recognition (NER) tasks.
1721
- """,
1722
- InternLM2_START_DOCSTRING,
1723
- )
1724
- class InternLM2ForTokenClassification(InternLM2PreTrainedModel):
1725
- """Token classification model for InternLM2."""
1726
-
1727
- def __init__(self, config):
1728
- super().__init__(config)
1729
- self.num_labels = config.num_labels
1730
- self.model = InternLM2Model(config)
1731
- if getattr(config, "classifier_dropout", None) is not None:
1732
- classifier_dropout = config.classifier_dropout
1733
- elif getattr(config, "hidden_dropout", None) is not None:
1734
- classifier_dropout = config.hidden_dropout
1735
- else:
1736
- classifier_dropout = 0.1
1737
- self.dropout = nn.Dropout(classifier_dropout)
1738
- self.score = nn.Linear(config.hidden_size, config.num_labels)
1739
-
1740
- # Initialize weights and apply final processing
1741
- self.post_init()
1742
-
1743
- def get_input_embeddings(self):
1744
- return self.model.tok_embeddings
1745
-
1746
- def set_input_embeddings(self, value):
1747
- self.model.tok_embeddings = value
1748
-
1749
- @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1750
- def forward(
1751
- self,
1752
- input_ids: torch.LongTensor = None,
1753
- attention_mask: Optional[torch.Tensor] = None,
1754
- position_ids: Optional[torch.LongTensor] = None,
1755
- past_key_values: Optional[List[torch.FloatTensor]] = None,
1756
- inputs_embeds: Optional[torch.FloatTensor] = None,
1757
- labels: Optional[torch.LongTensor] = None,
1758
- use_cache: Optional[bool] = None,
1759
- output_attentions: Optional[bool] = None,
1760
- output_hidden_states: Optional[bool] = None,
1761
- return_dict: Optional[bool] = None,
1762
- ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1763
- r"""
1764
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1765
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1766
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1767
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1768
- """
1769
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1770
-
1771
- outputs = self.model(
1772
- input_ids,
1773
- attention_mask=attention_mask,
1774
- position_ids=position_ids,
1775
- past_key_values=past_key_values,
1776
- inputs_embeds=inputs_embeds,
1777
- use_cache=use_cache,
1778
- output_attentions=output_attentions,
1779
- output_hidden_states=output_hidden_states,
1780
- return_dict=return_dict,
1781
- )
1782
- sequence_output = outputs[0]
1783
- sequence_output = self.dropout(sequence_output)
1784
- logits = self.score(sequence_output)
1785
-
1786
- loss = None
1787
- if labels is not None:
1788
- loss_fct = CrossEntropyLoss()
1789
- loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1790
-
1791
- if not return_dict:
1792
- output = (logits,) + outputs[2:]
1793
- return ((loss,) + output) if loss is not None else output
1794
-
1795
- return TokenClassifierOutput(
1796
- loss=loss,
1797
- logits=logits,
1798
- hidden_states=outputs.hidden_states,
1799
- attentions=outputs.attentions,
1800
- )
 
13
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
  # See the License for the specific language governing permissions and
15
  # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
  import math
18
  import queue
19
  import threading
20
+ import warnings
21
  from typing import List, Optional, Tuple, Union
22
 
23
  import torch
 
27
  from torch import nn
28
  from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
  from transformers.activations import ACT2FN
 
 
30
  from transformers.modeling_outputs import (
31
  BaseModelOutputWithPast,
32
  CausalLMOutputWithPast,
 
33
  SequenceClassifierOutputWithPast,
 
34
  )
35
  from transformers.modeling_utils import PreTrainedModel
 
36
  from transformers.utils import (
37
  add_start_docstrings,
38
  add_start_docstrings_to_model_forward,
 
39
  logging,
40
  replace_return_docstrings,
41
  )
42
 
43
  try:
44
  from transformers.generation.streamers import BaseStreamer
45
+ except: # noqa # pylint: disable=bare-except
46
  BaseStreamer = None
47
 
48
+ from .configuration_internlm import InternLMConfig as InternLM2Config
 
 
 
 
 
 
 
 
49
 
50
  logger = logging.get_logger(__name__)
51
 
52
  _CONFIG_FOR_DOC = "InternLM2Config"
53
 
54
+ flash_attn_func, flash_attn_varlen_func = None, None
55
+ pad_input, index_first_axis, unpad_input = None, None, None
56
+ def _import_flash_attn():
57
+ global flash_attn_func, flash_attn_varlen_func
58
+ global pad_input, index_first_axis, unpad_input
59
+ try:
60
+ from flash_attn import flash_attn_func as _flash_attn_func, flash_attn_varlen_func as _flash_attn_varlen_func
61
+ from flash_attn.bert_padding import pad_input as _pad_input, index_first_axis as _index_first_axis, unpad_input as _unpad_input
62
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
63
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
64
+ except ImportError:
65
+ raise ImportError("flash_attn is not installed.")
66
+
67
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
68
  def _get_unpad_data(attention_mask):
69
  seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
70
  indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
71
  max_seqlen_in_batch = seqlens_in_batch.max().item()
72
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
73
  return (
74
  indices,
75
  cu_seqlens,
 
77
  )
78
 
79
 
80
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
81
+ def _make_causal_mask(
82
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
83
+ ):
84
+ """
85
+ Make causal mask used for bi-directional self-attention.
86
+ """
87
+ bsz, tgt_len = input_ids_shape
88
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
89
+ mask_cond = torch.arange(mask.size(-1), device=device)
90
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
91
+ mask = mask.to(dtype)
92
+
93
+ if past_key_values_length > 0:
94
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
95
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
96
+
97
+
98
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
99
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
100
+ """
101
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
102
+ """
103
+ bsz, src_len = mask.size()
104
+ tgt_len = tgt_len if tgt_len is not None else src_len
105
+
106
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
107
+
108
+ inverted_mask = 1.0 - expanded_mask
109
+
110
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
111
+
112
 
113
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
114
+ class InternLM2RMSNorm(nn.Module):
115
  def __init__(self, hidden_size, eps=1e-6):
116
+ """
117
+ InternLM2RMSNorm is equivalent to T5LayerNorm
118
+ """
119
  super().__init__()
120
  self.weight = nn.Parameter(torch.ones(hidden_size))
121
  self.variance_epsilon = eps
 
128
  return self.weight * hidden_states.to(input_dtype)
129
 
130
 
131
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
 
 
132
  class InternLM2RotaryEmbedding(nn.Module):
133
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
 
 
134
  super().__init__()
135
+
136
  self.dim = dim
137
  self.max_position_embeddings = max_position_embeddings
138
  self.base = base
139
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
140
  self.register_buffer("inv_freq", inv_freq, persistent=False)
 
 
141
 
142
+ # Build here to make `torch.jit.trace` work.
143
+ self._set_cos_sin_cache(
144
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
145
+ )
146
+
147
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
148
+ self.max_seq_len_cached = seq_len
149
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
150
+
151
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
152
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
153
+ emb = torch.cat((freqs, freqs), dim=-1)
154
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
155
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
156
+
157
+ def forward(self, x, seq_len=None):
158
  # x: [bs, num_attention_heads, seq_len, head_size]
159
+ if seq_len > self.max_seq_len_cached:
160
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
161
 
162
+ return (
163
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
164
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
165
+ )
166
 
167
+
168
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
169
  class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
170
  """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
171
 
172
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
173
+ self.scaling_factor = scaling_factor
174
+ super().__init__(dim, max_position_embeddings, base, device)
175
+
176
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
177
+ self.max_seq_len_cached = seq_len
178
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
179
+ t = t / self.scaling_factor
180
+
181
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
182
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
183
+ emb = torch.cat((freqs, freqs), dim=-1)
184
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
185
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
186
 
187
 
188
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
189
  class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
190
  """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
191
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
192
+ """
193
+
194
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
195
+ self.scaling_factor = scaling_factor
196
+ super().__init__(dim, max_position_embeddings, base, device)
197
+
198
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
199
+ self.max_seq_len_cached = seq_len
200
 
 
 
 
201
  if seq_len > self.max_position_embeddings:
202
  base = self.base * (
203
  (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
204
  ) ** (self.dim / (self.dim - 2))
205
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
206
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
207
 
208
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
 
209
 
210
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
211
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
212
+ emb = torch.cat((freqs, freqs), dim=-1)
213
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
214
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
215
 
216
+
217
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
218
  def rotate_half(x):
219
  """Rotates half the hidden dims of the input."""
220
  x1 = x[..., : x.shape[-1] // 2]
 
222
  return torch.cat((-x2, x1), dim=-1)
223
 
224
 
225
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
226
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
227
+ """Applies Rotary Position Embedding to the query and key tensors."""
228
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
229
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  q_embed = (q * cos) + (rotate_half(q) * sin)
231
  k_embed = (k * cos) + (rotate_half(k) * sin)
232
  return q_embed, k_embed
233
 
234
 
235
  class InternLM2MLP(nn.Module):
 
 
236
  def __init__(self, config):
237
  super().__init__()
238
  self.config = config
 
249
  return down_proj
250
 
251
 
252
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
253
  def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
254
  """
255
  This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
 
262
  return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
263
 
264
 
265
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
266
  class InternLM2Attention(nn.Module):
267
  """Multi-headed attention from 'Attention Is All You Need' paper"""
268
 
269
+ def __init__(self, config: InternLM2Config):
270
  super().__init__()
271
  self.config = config
 
 
 
 
 
 
 
 
272
  self.hidden_size = config.hidden_size
273
  self.num_heads = config.num_attention_heads
274
  self.head_dim = self.hidden_size // self.num_heads
275
  self.num_key_value_heads = config.num_key_value_heads
276
  self.num_key_value_groups = self.num_heads // self.num_key_value_heads
277
  self.max_position_embeddings = config.max_position_embeddings
 
278
  self.is_causal = True
279
 
280
  if (self.head_dim * self.num_heads) != self.hidden_size:
 
288
  (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
289
  bias=config.bias,
290
  )
 
291
 
292
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
293
  self._init_rope()
294
 
295
  def _init_rope(self):
 
297
  self.rotary_emb = InternLM2RotaryEmbedding(
298
  self.head_dim,
299
  max_position_embeddings=self.max_position_embeddings,
300
+ base=self.config.rope_theta,
301
  )
302
  else:
303
  scaling_type = self.config.rope_scaling["type"]
304
  scaling_factor = self.config.rope_scaling["factor"]
305
+ if scaling_type == "dynamic":
306
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
307
  self.head_dim,
308
  max_position_embeddings=self.max_position_embeddings,
309
+ base=self.config.rope_theta,
310
  scaling_factor=scaling_factor,
 
311
  )
312
+ elif scaling_type == "linear":
313
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
314
  self.head_dim,
315
  max_position_embeddings=self.max_position_embeddings,
316
+ base=self.config.rope_theta,
317
  scaling_factor=scaling_factor,
 
318
  )
319
  else:
320
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
321
+ return self.rotary_emb
322
+
323
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
324
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
325
 
326
  def forward(
327
  self,
328
  hidden_states: torch.Tensor,
329
  attention_mask: Optional[torch.Tensor] = None,
330
  position_ids: Optional[torch.LongTensor] = None,
331
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
332
  output_attentions: bool = False,
333
+ use_cache: bool = False,
334
+ **kwargs,
335
  ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
336
+ if "padding_mask" in kwargs:
337
+ warnings.warn(
338
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
339
+ "Please make sure use `attention_mask` instead.`"
340
+ )
341
+
342
  bsz, q_len, _ = hidden_states.size()
343
 
344
+ qkv_states = self.wqkv(hidden_states)
 
 
 
 
 
 
 
 
345
 
346
  qkv_states = rearrange(
347
  qkv_states,
 
351
  )
352
 
353
  query_states = qkv_states[..., : self.num_key_value_groups, :]
354
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
355
+ key_states = qkv_states[..., -2, :]
356
+ value_states = qkv_states[..., -1, :]
357
 
358
+ query_states = query_states.transpose(1, 2)
359
+ key_states = key_states.transpose(1, 2)
360
+ value_states = value_states.transpose(1, 2)
361
+
362
+ kv_seq_len = key_states.shape[-2]
363
+ if past_key_value is not None:
364
+ kv_seq_len += past_key_value[0].shape[-2]
365
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
366
  query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
367
 
368
  if past_key_value is not None:
369
+ # reuse k, v, self_attention
370
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
371
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
372
+
373
+ past_key_value = (key_states, value_states) if use_cache else None
374
 
375
  key_states = repeat_kv(key_states, self.num_key_value_groups)
376
  value_states = repeat_kv(value_states, self.num_key_value_groups)
377
 
378
  attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
379
 
380
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
381
+ raise ValueError(
382
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
383
+ f" {attn_weights.size()}"
384
+ )
385
+
386
+ if attention_mask is not None:
387
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
388
+ raise ValueError(
389
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
390
+ )
391
+ attn_weights = attn_weights + attention_mask
392
 
393
  # upcast attention to fp32
394
  attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
 
401
  )
402
 
403
  attn_output = attn_output.transpose(1, 2).contiguous()
 
404
  attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
405
 
406
+ attn_output = self.wo(attn_output)
 
 
 
 
 
 
 
 
 
 
407
 
408
  if not output_attentions:
409
  attn_weights = None
 
411
  return attn_output, attn_weights, past_key_value
412
 
413
 
414
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
415
  class InternLM2FlashAttention2(InternLM2Attention):
416
  """
417
  InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
 
419
  flash attention and deal with padding tokens in case the input contains any of them.
420
  """
421
 
 
 
 
 
 
 
 
 
 
 
 
422
  def forward(
423
  self,
424
  hidden_states: torch.Tensor,
425
  attention_mask: Optional[torch.LongTensor] = None,
426
  position_ids: Optional[torch.LongTensor] = None,
427
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
428
  output_attentions: bool = False,
429
  use_cache: bool = False,
430
+ **kwargs,
431
  ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
432
+ # InternLM2FlashAttention2 attention does not support output_attentions
433
+ if "padding_mask" in kwargs:
434
+ warnings.warn(
435
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
436
+ "Please make sure use `attention_mask` instead.`"
437
  )
438
 
439
+ # overwrite attention_mask with padding_mask
440
+ attention_mask = kwargs.pop("padding_mask")
441
+
442
  output_attentions = False
443
 
444
  bsz, q_len, _ = hidden_states.size()
 
461
  key_states = key_states.transpose(1, 2)
462
  value_states = value_states.transpose(1, 2)
463
 
464
+ kv_seq_len = key_states.shape[-2]
465
+ if past_key_value is not None:
466
+ kv_seq_len += past_key_value[0].shape[-2]
467
+
468
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
469
+
470
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
471
 
472
  if past_key_value is not None:
473
+ # reuse k, v, self_attention
474
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
475
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
476
+
477
+ past_key_value = (key_states, value_states) if use_cache else None
478
 
 
 
 
479
  query_states = query_states.transpose(1, 2)
480
  key_states = key_states.transpose(1, 2)
481
  value_states = value_states.transpose(1, 2)
482
 
483
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
484
 
485
  attn_output = self._flash_attention_forward(
486
  query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
487
  )
 
488
  attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
489
  attn_output = self.wo(attn_output)
490
 
491
  if not output_attentions:
492
  attn_weights = None
493
 
494
+ return attn_output, attn_weights, past_key_value
495
 
496
  def _flash_attention_forward(
497
  self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
 
510
  attention_mask (`torch.Tensor`):
511
  The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
512
  position of padding tokens and 1 for the position of non-padding tokens.
513
+ dropout (`int`, *optional*):
514
  Attention dropout
515
  softmax_scale (`float`, *optional*):
516
  The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
517
  """
 
 
 
 
 
 
 
518
  # Contains at least one padding token in the sequence
519
+ causal = self.is_causal and query_length != 1
520
  if attention_mask is not None:
521
  batch_size = query_states.shape[0]
522
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
523
  query_states, key_states, value_states, attention_mask, query_length
524
  )
525
 
526
  cu_seqlens_q, cu_seqlens_k = cu_seq_lens
527
  max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
528
 
529
+ attn_output_unpad = flash_attn_varlen_func(
530
  query_states,
531
  key_states,
532
  value_states,
 
539
  causal=causal,
540
  )
541
 
542
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
543
  else:
544
+ attn_output = flash_attn_func(
545
  query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
546
  )
547
 
548
  return attn_output
549
 
550
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
551
  indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
552
  batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
553
 
554
+ key_layer = index_first_axis(
555
  key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
556
  )
557
+ value_layer = index_first_axis(
558
  value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
559
  )
560
+
561
  if query_length == kv_seq_len:
562
+ query_layer = index_first_axis(
563
  query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
564
  )
565
  cu_seqlens_q = cu_seqlens_k
 
575
  else:
576
  # The -q_len: slice assumes left padding.
577
  attention_mask = attention_mask[:, -query_length:]
578
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
 
 
579
 
580
  return (
581
  query_layer,
582
  key_layer,
583
  value_layer,
584
+ indices_q.to(torch.int64),
585
  (cu_seqlens_q, cu_seqlens_k),
586
  (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
587
  )
588
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
589
  INTERNLM2_ATTENTION_CLASSES = {
590
  "eager": InternLM2Attention,
591
  "flash_attention_2": InternLM2FlashAttention2,
 
592
  }
593
 
594
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
 
595
  class InternLM2DecoderLayer(nn.Module):
596
+ def __init__(self, config: InternLM2Config):
 
 
597
  super().__init__()
598
  self.hidden_size = config.hidden_size
 
599
 
600
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
601
 
602
  self.feed_forward = InternLM2MLP(config)
603
  self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
 
608
  hidden_states: torch.Tensor,
609
  attention_mask: Optional[torch.Tensor] = None,
610
  position_ids: Optional[torch.LongTensor] = None,
611
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
612
  output_attentions: Optional[bool] = False,
613
  use_cache: Optional[bool] = False,
614
+ **kwargs,
615
  ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
616
  """
617
  Args:
 
627
  (see `past_key_values`).
628
  past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
629
  """
630
+ if "padding_mask" in kwargs:
631
+ warnings.warn(
632
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
633
+ "Please make sure use `attention_mask` instead.`"
634
+ )
635
+
636
  residual = hidden_states
637
 
638
  hidden_states = self.attention_norm(hidden_states)
 
645
  past_key_value=past_key_value,
646
  output_attentions=output_attentions,
647
  use_cache=use_cache,
648
+ **kwargs,
649
  )
650
  hidden_states = residual + hidden_states
651
 
 
689
  InternLM2_START_DOCSTRING,
690
  )
691
  class InternLM2PreTrainedModel(PreTrainedModel):
 
 
 
 
692
  config_class = InternLM2Config
693
  base_model_prefix = "model"
694
  supports_gradient_checkpointing = True
695
  _no_split_modules = ["InternLM2DecoderLayer"]
696
+ _skip_keys_device_placement = "past_key_values"
 
 
 
 
 
697
 
698
  def _init_weights(self, module):
699
  std = self.config.initializer_range
 
742
  config.n_positions - 1]`.
743
 
744
  [What are position IDs?](../glossary#position-ids)
745
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
746
+ when `config.use_cache=True`):
747
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
748
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
749
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
 
 
 
 
 
750
 
751
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
752
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
753
 
754
  If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
755
  have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
 
769
  more detail.
770
  return_dict (`bool`, *optional*):
771
  Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
 
 
 
 
772
  """
773
 
774
 
775
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
776
  @add_start_docstrings(
777
  "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
778
  InternLM2_START_DOCSTRING,
 
795
 
796
  self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
797
 
798
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
 
 
799
  self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
800
 
801
  self.gradient_checkpointing = False
 
808
  def set_input_embeddings(self, value):
809
  self.tok_embeddings = value
810
 
811
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
812
+ # create causal mask
813
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
814
+ combined_attention_mask = None
815
+ if input_shape[-1] > 1:
816
+ combined_attention_mask = _make_causal_mask(
817
+ input_shape,
818
+ inputs_embeds.dtype,
819
+ device=inputs_embeds.device,
820
+ past_key_values_length=past_key_values_length,
821
+ )
822
+
823
+ if attention_mask is not None:
824
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
825
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
826
+ inputs_embeds.device
827
+ )
828
+ combined_attention_mask = (
829
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
830
+ )
831
+
832
+ return combined_attention_mask
833
+
834
  @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
835
  def forward(
836
  self,
837
  input_ids: torch.LongTensor = None,
838
  attention_mask: Optional[torch.Tensor] = None,
839
  position_ids: Optional[torch.LongTensor] = None,
840
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
841
  inputs_embeds: Optional[torch.FloatTensor] = None,
842
  use_cache: Optional[bool] = None,
843
  output_attentions: Optional[bool] = None,
844
  output_hidden_states: Optional[bool] = None,
845
  return_dict: Optional[bool] = None,
 
846
  ) -> Union[Tuple, BaseModelOutputWithPast]:
847
  output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
848
  output_hidden_states = (
849
  output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
850
  )
851
  use_cache = use_cache if use_cache is not None else self.config.use_cache
852
+
853
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
854
 
855
+ if self.config.attn_implementation == "flash_attention_2":
856
+ _import_flash_attn()
857
+
858
+ # retrieve input_ids and inputs_embeds
859
+ if input_ids is not None and inputs_embeds is not None:
860
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
861
+ elif input_ids is not None:
862
+ batch_size, seq_length = input_ids.shape[:2]
863
+ elif inputs_embeds is not None:
864
+ batch_size, seq_length = inputs_embeds.shape[:2]
865
+ else:
866
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
867
+
868
+ seq_length_with_past = seq_length
869
+ past_key_values_length = 0
870
+ if past_key_values is not None:
871
+ past_key_values_length = past_key_values[0][0].shape[2]
872
+ seq_length_with_past = seq_length_with_past + past_key_values_length
873
 
874
+ if position_ids is None:
875
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
876
+ position_ids = torch.arange(
877
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
878
  )
879
+ position_ids = position_ids.unsqueeze(0)
880
 
881
  if inputs_embeds is None:
882
  inputs_embeds = self.tok_embeddings(input_ids)
883
 
884
+ if self.config.attn_implementation == "flash_attention_2":
885
+ # 2d mask is passed through the layers
886
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
887
+ else:
888
+ if attention_mask is None:
889
+ attention_mask = torch.ones(
890
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
891
+ )
892
+ attention_mask = self._prepare_decoder_attention_mask(
893
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
894
  )
 
 
 
 
 
 
895
 
896
  # embed positions
897
  hidden_states = inputs_embeds
898
 
899
+ if self.gradient_checkpointing and self.training:
900
+ if use_cache:
901
+ logger.warning_once(
902
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
903
+ )
904
+ use_cache = False
905
+
906
  # decoder layers
907
  all_hidden_states = () if output_hidden_states else None
908
  all_self_attns = () if output_attentions else None
909
+ next_decoder_cache = () if use_cache else None
910
 
911
+ for idx, decoder_layer in enumerate(self.layers):
912
  if output_hidden_states:
913
  all_hidden_states += (hidden_states,)
914
 
915
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
916
+
917
  if self.gradient_checkpointing and self.training:
918
+
919
+ def create_custom_forward(module):
920
+ def custom_forward(*inputs):
921
+ # None for past_key_value
922
+ return module(*inputs, output_attentions, None)
923
+
924
+ return custom_forward
925
+
926
+ layer_outputs = torch.utils.checkpoint.checkpoint(
927
+ create_custom_forward(decoder_layer),
928
  hidden_states,
929
+ attention_mask,
930
  position_ids,
931
+ None,
 
 
 
932
  )
933
  else:
934
  layer_outputs = decoder_layer(
935
  hidden_states,
936
+ attention_mask=attention_mask,
937
  position_ids=position_ids,
938
+ past_key_value=past_key_value,
939
  output_attentions=output_attentions,
940
  use_cache=use_cache,
 
941
  )
942
 
943
  hidden_states = layer_outputs[0]
944
 
945
  if use_cache:
946
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
947
 
948
  if output_attentions:
949
  all_self_attns += (layer_outputs[1],)
 
955
  all_hidden_states += (hidden_states,)
956
 
957
  next_cache = next_decoder_cache if use_cache else None
 
 
 
958
  if not return_dict:
959
  return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
960
  return BaseModelOutputWithPast(
 
964
  attentions=all_self_attns,
965
  )
966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
967
 
968
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
969
  class InternLM2ForCausalLM(InternLM2PreTrainedModel):
 
 
970
  _auto_class = "AutoModelForCausalLM"
971
+
972
  _tied_weights_keys = ["output.weight"]
973
 
974
  def __init__(self, config):
 
1005
  input_ids: torch.LongTensor = None,
1006
  attention_mask: Optional[torch.Tensor] = None,
1007
  position_ids: Optional[torch.LongTensor] = None,
1008
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1009
  inputs_embeds: Optional[torch.FloatTensor] = None,
1010
  labels: Optional[torch.LongTensor] = None,
1011
  use_cache: Optional[bool] = None,
1012
  output_attentions: Optional[bool] = None,
1013
  output_hidden_states: Optional[bool] = None,
1014
  return_dict: Optional[bool] = None,
 
1015
  ) -> Union[Tuple, CausalLMOutputWithPast]:
1016
  r"""
1017
  Args:
 
1027
  ```python
1028
  >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1029
 
1030
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1031
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1032
 
1033
  >>> prompt = "Hey, are you conscious? Can you talk to me?"
1034
  >>> inputs = tokenizer(prompt, return_tensors="pt")
 
1056
  output_attentions=output_attentions,
1057
  output_hidden_states=output_hidden_states,
1058
  return_dict=return_dict,
 
1059
  )
1060
 
1061
  hidden_states = outputs[0]
1062
+ logits = self.output(hidden_states)
 
 
 
 
 
 
 
 
1063
  logits = logits.float()
1064
 
1065
  loss = None
 
1088
  )
1089
 
1090
  def prepare_inputs_for_generation(
1091
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
 
 
 
 
 
 
 
1092
  ):
 
1093
  if past_key_values is not None:
1094
+ past_length = past_key_values[0][0].shape[2]
1095
+
1096
+ # Some generation methods already pass only the last input ID
1097
+ if input_ids.shape[1] > past_length:
1098
+ remove_prefix_length = past_length
 
 
 
 
1099
  else:
1100
+ # Default to old behavior: keep only final ID
1101
+ remove_prefix_length = input_ids.shape[1] - 1
1102
+
1103
+ input_ids = input_ids[:, remove_prefix_length:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1104
 
1105
  position_ids = kwargs.get("position_ids", None)
1106
  if attention_mask is not None and position_ids is None:
 
1114
  if inputs_embeds is not None and past_key_values is None:
1115
  model_inputs = {"inputs_embeds": inputs_embeds}
1116
  else:
1117
+ model_inputs = {"input_ids": input_ids}
 
 
 
 
 
 
 
 
 
 
1118
 
1119
  model_inputs.update(
1120
  {
1121
  "position_ids": position_ids,
 
1122
  "past_key_values": past_key_values,
1123
+ "use_cache": kwargs.get("use_cache"),
1124
  "attention_mask": attention_mask,
1125
  }
1126
  )
 
1135
  )
1136
  return reordered_past
1137
 
1138
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=""):
1139
+ prompt = ""
 
 
 
 
 
1140
  if meta_instruction:
1141
+ prompt += f"""<s>[UNUSED_TOKEN_146]system\n{meta_instruction}[UNUSED_TOKEN_145]\n"""
1142
+ else:
1143
+ prompt += "<s>"
1144
  for record in history:
1145
+ prompt += f"""[UNUSED_TOKEN_146]user\n{record[0]}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n{record[1]}[UNUSED_TOKEN_145]\n"""
1146
+ prompt += f"""[UNUSED_TOKEN_146]user\n{query}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n"""
1147
  return tokenizer([prompt], return_tensors="pt")
1148
 
1149
  @torch.no_grad()
 
1151
  self,
1152
  tokenizer,
1153
  query: str,
1154
+ history: List[Tuple[str, str]] = [],
1155
  streamer: Optional[BaseStreamer] = None,
1156
  max_new_tokens: int = 1024,
1157
  do_sample: bool = True,
1158
  temperature: float = 0.8,
1159
  top_p: float = 0.8,
1160
  meta_instruction: str = "You are an AI assistant whose name is InternLM (书生·浦语).\n"
1161
+ "- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n"
1162
+ "- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.",
 
 
1163
  **kwargs,
1164
  ):
 
 
1165
  inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1166
  inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1167
  # also add end-of-assistant token in eos token id to avoid unnecessary generation
1168
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["[UNUSED_TOKEN_145]"])[0]]
1169
  outputs = self.generate(
1170
  **inputs,
1171
  streamer=streamer,
 
1178
  )
1179
  outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :]
1180
  response = tokenizer.decode(outputs, skip_special_tokens=True)
1181
+ response = response.split("[UNUSED_TOKEN_145]")[0]
1182
  history = history + [(query, response)]
1183
  return response, history
1184
 
 
1187
  self,
1188
  tokenizer,
1189
  query: str,
1190
+ history: List[Tuple[str, str]] = [],
1191
  max_new_tokens: int = 1024,
1192
  do_sample: bool = True,
1193
  temperature: float = 0.8,
1194
  top_p: float = 0.8,
1195
  **kwargs,
1196
  ):
 
 
1197
  """
1198
  Return a generator in format: (response, history)
1199
  Eg.
 
1209
  response_queue = queue.Queue(maxsize=20)
1210
 
1211
  class ChatStreamer(BaseStreamer):
 
 
 
 
1212
  def __init__(self, tokenizer) -> None:
1213
  super().__init__()
1214
  self.tokenizer = tokenizer
 
1216
  self.query = query
1217
  self.history = history
1218
  self.response = ""
 
1219
  self.received_inputs = False
1220
  self.queue.put((self.response, history + [(self.query, self.response)]))
1221
 
 
1230
  self.received_inputs = True
1231
  return
1232
 
1233
+ token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
1234
+ if token.strip() != "[UNUSED_TOKEN_145]":
 
1235
  self.response = self.response + token
1236
  history = self.history + [(self.query, self.response)]
1237
  self.queue.put((self.response, history))
 
 
 
1238
 
1239
  def end(self):
1240
  self.queue.put(None)
 
1264
  return consumer()
1265
 
1266
 
1267
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1268
  @add_start_docstrings(
1269
  """
1270
  The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1271
 
1272
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1273
+ as other causal models (e.g. GPT-2) do.
1274
 
1275
  Since it does classification on the last token, it requires to know the position of the last token. If a
1276
  `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
 
1281
  InternLM2_START_DOCSTRING,
1282
  )
1283
  class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
 
 
1284
  def __init__(self, config):
1285
  super().__init__(config)
1286
  self.num_labels = config.num_labels
 
1302
  input_ids: torch.LongTensor = None,
1303
  attention_mask: Optional[torch.Tensor] = None,
1304
  position_ids: Optional[torch.LongTensor] = None,
1305
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1306
  inputs_embeds: Optional[torch.FloatTensor] = None,
1307
  labels: Optional[torch.LongTensor] = None,
1308
  use_cache: Optional[bool] = None,
 
1343
  sequence_lengths = -1
1344
  else:
1345
  if input_ids is not None:
1346
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1347
+ logits.device
1348
+ )
 
1349
  else:
1350
  sequence_lengths = -1
1351
 
 
1357
  if self.config.problem_type is None:
1358
  if self.num_labels == 1:
1359
  self.config.problem_type = "regression"
1360
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1361
  self.config.problem_type = "single_label_classification"
1362
  else:
1363
  self.config.problem_type = "multi_label_classification"
 
1385
  hidden_states=transformer_outputs.hidden_states,
1386
  attentions=transformer_outputs.attentions,
1387
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model-00004-of-00008.safetensors → pytorch_model-00001-of-00008.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2dd631ca87a5e516780832e747156e304a3c9bb53f85c4304550bff970d2db26
3
- size 1946242728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdd03c1ea040b17ce992eb303d0824f5694932902d0c3fa57b5a8d1df2dc8082
3
+ size 1949342053
model-00001-of-00008.safetensors → pytorch_model-00002-of-00008.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cec9c25c5f54c545deb16123fc72a0b265fd6bed4c467b4519380a8b215b62b
3
- size 1949337704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:377d66327bcc2dc4b3e5f4e90f24b428c55832ece07194990248647ea65c2db5
3
+ size 1946249825
model-00002-of-00008.safetensors → pytorch_model-00003-of-00008.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4735b4b15a670c33a6aefe70cf39e4ba08468a496167b5233fa43ebcfbd42895
3
- size 1946242696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98aa01b7ffd7901699b4588b5e6b8f753e66c1874f8b855c36b23de2c95ae9d8
3
+ size 1979786923
model-00003-of-00008.safetensors → pytorch_model-00004-of-00008.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:523c7292140bc4c37d5fbd517ec7f9d0aa27b7d22822fdfc730f94f3bf02ebac
3
- size 1979780440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9362abdb2afd077e579a7860ad570769255c2d64af2b0831413578c77a44d779
3
+ size 1946249889
pytorch_model-00005-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56761cf09eb95e676bce5c1dc53fd4f8c317775d146fe2313dcb7b81c48cfa39
3
+ size 1979786987
pytorch_model-00006-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a825cc1e85ffc038efaed5a06ae398f424916bdd768bcf62d7fb4bc552d71ef
3
+ size 1946249889
pytorch_model-00007-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:224105eee8f19409994aea890cc6cc9da16ef4278ad831fdd4e9fcf86f925256
3
+ size 1979786987
pytorch_model-00008-of-00008.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed276d23e8a4ab9ec5bafe491cb7147b4b51232a66ce0b9f304bc8a5430d82f
3
+ size 1748039973
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8a1efb6998624330a0564f9bba63eb8ccae0ad54a6d0176c64f2eb30721f2b5
3
+ size 18179
special_tokens_map.json CHANGED
@@ -1,38 +1,6 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>",
5
- "<|action_start|>",
6
- "<|action_end|>",
7
- "<|interpreter|>",
8
- "<|plugin|>"
9
- ],
10
- "bos_token": {
11
- "content": "<s>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- },
17
- "eos_token": {
18
- "content": "</s>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- },
24
- "pad_token": {
25
- "content": "</s>",
26
- "lstrip": false,
27
- "normalized": false,
28
- "rstrip": false,
29
- "single_word": false
30
- },
31
- "unk_token": {
32
- "content": "<unk>",
33
- "lstrip": false,
34
- "normalized": false,
35
- "rstrip": false,
36
- "single_word": false
37
- }
38
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "</s>",
5
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  }
tokenization_internlm2.py → tokenization_internlm.py RENAMED
@@ -1,7 +1,10 @@
1
  # coding=utf-8
2
- # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
3
  #
4
- # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
 
 
 
5
  #
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
  # you may not use this file except in compliance with the License.
@@ -15,7 +18,7 @@
15
  # See the License for the specific language governing permissions and
16
  # limitations under the License.
17
 
18
- """Tokenization classes for InternLM."""
19
  import os
20
  from shutil import copyfile
21
  from typing import Any, Dict, List, Optional, Tuple
@@ -31,10 +34,9 @@ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
31
  PRETRAINED_VOCAB_FILES_MAP = {}
32
 
33
 
34
- # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
35
- class InternLM2Tokenizer(PreTrainedTokenizer):
36
  """
37
- Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
38
 
39
  Args:
40
  vocab_file (`str`):
@@ -77,6 +79,8 @@ class InternLM2Tokenizer(PreTrainedTokenizer):
77
  **kwargs,
78
  )
79
 
 
 
80
  @property
81
  def no_prefix_space_tokens(self):
82
  if self._no_prefix_space_tokens is None:
 
1
  # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
  #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
  #
9
  # Licensed under the Apache License, Version 2.0 (the "License");
10
  # you may not use this file except in compliance with the License.
 
18
  # See the License for the specific language governing permissions and
19
  # limitations under the License.
20
 
21
+ """Tokenization classes for IntermLM."""
22
  import os
23
  from shutil import copyfile
24
  from typing import Any, Dict, List, Optional, Tuple
 
34
  PRETRAINED_VOCAB_FILES_MAP = {}
35
 
36
 
37
+ class InternLMTokenizer(PreTrainedTokenizer):
 
38
  """
39
+ Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
40
 
41
  Args:
42
  vocab_file (`str`):
 
79
  **kwargs,
80
  )
81
 
82
+ """ Initialization"""
83
+
84
  @property
85
  def no_prefix_space_tokens(self):
86
  if self._no_prefix_space_tokens is None:
tokenization_internlm2_fast.py DELETED
@@ -1,214 +0,0 @@
1
- # coding=utf-8
2
- # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # This code is based on transformers/src/transformers/models/llama/tokenization_llama_fast.py
5
- #
6
- # Licensed under the Apache License, Version 2.0 (the "License");
7
- # you may not use this file except in compliance with the License.
8
- # You may obtain a copy of the License at
9
- #
10
- # http://www.apache.org/licenses/LICENSE-2.0
11
- #
12
- # Unless required by applicable law or agreed to in writing, software
13
- # distributed under the License is distributed on an "AS IS" BASIS,
14
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
- # See the License for the specific language governing permissions and
16
- # limitations under the License.
17
-
18
- """Tokenization Fast class for InternLM."""
19
- import os
20
- from shutil import copyfile
21
- from typing import Any, Dict, Optional, Tuple
22
-
23
- from tokenizers import processors, decoders, Tokenizer, normalizers
24
- from tokenizers.models import BPE
25
-
26
- from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
27
- from transformers.utils import logging
28
-
29
- from transformers.convert_slow_tokenizer import (
30
- SLOW_TO_FAST_CONVERTERS,
31
- SpmConverter,
32
- SentencePieceExtractor,
33
- )
34
-
35
- from .tokenization_internlm2 import InternLM2Tokenizer
36
-
37
- logger = logging.get_logger(__name__)
38
-
39
- VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
40
-
41
- # Modified from transformers.convert_slow_tokenizer.LlamaConverter
42
- class InternLM2Converter(SpmConverter):
43
- handle_byte_fallback = True
44
-
45
- def vocab(self, proto):
46
- vocab = [
47
- ("<unk>", 0.0),
48
- ("<s>", 0.0),
49
- ("</s>", 0.0),
50
- ]
51
- vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
52
- return vocab
53
-
54
- def unk_id(self, proto):
55
- unk_id = 0
56
- return unk_id
57
-
58
- def decoder(self, replacement, add_prefix_space):
59
- decoders_sequence = [
60
- decoders.Replace("▁", " "),
61
- decoders.ByteFallback(),
62
- decoders.Fuse(),
63
- ]
64
- if self.proto.normalizer_spec.add_dummy_prefix:
65
- decoders_sequence.append(decoders.Strip(content=" ", left=1))
66
- return decoders.Sequence(decoders_sequence)
67
-
68
- def tokenizer(self, proto):
69
- model_type = proto.trainer_spec.model_type
70
- vocab_scores = self.vocab(proto)
71
- # special tokens
72
- added_tokens = self.original_tokenizer.added_tokens_decoder
73
- for i in range(len(vocab_scores)):
74
- piece, score = vocab_scores[i]
75
- if i in added_tokens:
76
- vocab_scores[i] = (added_tokens[i].content, score)
77
- if model_type == 1:
78
- raise RuntimeError("InternLM2 is supposed to be a BPE model!")
79
-
80
- elif model_type == 2:
81
- _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
82
- bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
83
- tokenizer = Tokenizer(
84
- BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True)
85
- )
86
- tokenizer.add_special_tokens(
87
- [ added_token for index, added_token in added_tokens.items()]
88
- )
89
- else:
90
- raise Exception(
91
- "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
92
- )
93
-
94
- return tokenizer
95
-
96
- def normalizer(self, proto):
97
- normalizers_list = []
98
- if proto.normalizer_spec.add_dummy_prefix:
99
- normalizers_list.append(normalizers.Prepend(prepend="▁"))
100
- normalizers_list.append(normalizers.Replace(pattern=" ", content="▁"))
101
- return normalizers.Sequence(normalizers_list)
102
-
103
- def pre_tokenizer(self, replacement, add_prefix_space):
104
- return None
105
-
106
- SLOW_TO_FAST_CONVERTERS["InternLM2Tokenizer"] = InternLM2Converter
107
-
108
-
109
- # Modified from transformers.model.llama.tokenization_llama_fast.LlamaTokenizerFast -> InternLM2TokenizerFast
110
- class InternLM2TokenizerFast(PreTrainedTokenizerFast):
111
- vocab_files_names = VOCAB_FILES_NAMES
112
- slow_tokenizer_class = InternLM2Tokenizer
113
- padding_side = "left"
114
- model_input_names = ["input_ids", "attention_mask"]
115
- _auto_class = "AutoTokenizer"
116
-
117
- def __init__(
118
- self,
119
- vocab_file,
120
- unk_token="<unk>",
121
- bos_token="<s>",
122
- eos_token="</s>",
123
- pad_token="</s>",
124
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
125
- add_bos_token=True,
126
- add_eos_token=False,
127
- decode_with_prefix_space=False,
128
- clean_up_tokenization_spaces=False,
129
- **kwargs,
130
- ):
131
- super().__init__(
132
- vocab_file=vocab_file,
133
- unk_token=unk_token,
134
- bos_token=bos_token,
135
- eos_token=eos_token,
136
- pad_token=pad_token,
137
- sp_model_kwargs=sp_model_kwargs,
138
- add_bos_token=add_bos_token,
139
- add_eos_token=add_eos_token,
140
- decode_with_prefix_space=decode_with_prefix_space,
141
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
142
- **kwargs,
143
- )
144
- self._add_bos_token = add_bos_token
145
- self._add_eos_token = add_eos_token
146
- self.update_post_processor()
147
- self.vocab_file = vocab_file
148
-
149
- @property
150
- def can_save_slow_tokenizer(self) -> bool:
151
- return os.path.isfile(self.vocab_file) if self.vocab_file else False
152
-
153
- def update_post_processor(self):
154
- """
155
- Updates the underlying post processor with the current `bos_token` and `eos_token`.
156
- """
157
- bos = self.bos_token
158
- bos_token_id = self.bos_token_id
159
- if bos is None and self.add_bos_token:
160
- raise ValueError("add_bos_token = True but bos_token = None")
161
-
162
- eos = self.eos_token
163
- eos_token_id = self.eos_token_id
164
- if eos is None and self.add_eos_token:
165
- raise ValueError("add_eos_token = True but eos_token = None")
166
-
167
- single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
168
- pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
169
-
170
- special_tokens = []
171
- if self.add_bos_token:
172
- special_tokens.append((bos, bos_token_id))
173
- if self.add_eos_token:
174
- special_tokens.append((eos, eos_token_id))
175
- self._tokenizer.post_processor = processors.TemplateProcessing(
176
- single=single, pair=pair, special_tokens=special_tokens
177
- )
178
-
179
- @property
180
- def add_eos_token(self):
181
- return self._add_eos_token
182
-
183
- @property
184
- def add_bos_token(self):
185
- return self._add_bos_token
186
-
187
- @add_eos_token.setter
188
- def add_eos_token(self, value):
189
- self._add_eos_token = value
190
- self.update_post_processor()
191
-
192
- @add_bos_token.setter
193
- def add_bos_token(self, value):
194
- self._add_bos_token = value
195
- self.update_post_processor()
196
-
197
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
198
- if not self.can_save_slow_tokenizer:
199
- raise ValueError(
200
- "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
201
- "tokenizer."
202
- )
203
-
204
- if not os.path.isdir(save_directory):
205
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
206
- return
207
- out_vocab_file = os.path.join(
208
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
209
- )
210
-
211
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
212
- copyfile(self.vocab_file, out_vocab_file)
213
-
214
- return (out_vocab_file,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tokenizer_config.json CHANGED
@@ -1,102 +1,15 @@
1
  {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "added_tokens_decoder": {
5
- "0": {
6
- "content": "<unk>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "1": {
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "2": {
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false,
27
- "special": true
28
- },
29
- "92538": {
30
- "content": "<|plugin|>",
31
- "lstrip": false,
32
- "normalized": false,
33
- "rstrip": false,
34
- "single_word": false,
35
- "special": true
36
- },
37
- "92539": {
38
- "content": "<|interpreter|>",
39
- "lstrip": false,
40
- "normalized": false,
41
- "rstrip": false,
42
- "single_word": false,
43
- "special": true
44
- },
45
- "92540": {
46
- "content": "<|action_end|>",
47
- "lstrip": false,
48
- "normalized": false,
49
- "rstrip": false,
50
- "single_word": false,
51
- "special": true
52
- },
53
- "92541": {
54
- "content": "<|action_start|>",
55
- "lstrip": false,
56
- "normalized": false,
57
- "rstrip": false,
58
- "single_word": false,
59
- "special": true
60
- },
61
- "92542": {
62
- "content": "<|im_end|>",
63
- "lstrip": false,
64
- "normalized": false,
65
- "rstrip": false,
66
- "single_word": false,
67
- "special": true
68
- },
69
- "92543": {
70
- "content": "<|im_start|>",
71
- "lstrip": false,
72
- "normalized": false,
73
- "rstrip": false,
74
- "single_word": false,
75
- "special": true
76
- }
77
- },
78
- "additional_special_tokens": [
79
- "<|im_start|>",
80
- "<|im_end|>",
81
- "<|action_start|>",
82
- "<|action_end|>",
83
- "<|interpreter|>",
84
- "<|plugin|>"
85
- ],
86
  "auto_map": {
87
  "AutoTokenizer": [
88
- "tokenization_internlm2.InternLM2Tokenizer",
89
- "tokenization_internlm2_fast.InternLM2TokenizerFast"
90
  ]
91
  },
92
  "bos_token": "<s>",
93
- "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
94
  "clean_up_tokenization_spaces": false,
95
- "decode_with_prefix_space": false,
96
  "eos_token": "</s>",
97
  "model_max_length": 1000000000000000019884624838656,
98
  "pad_token": "</s>",
99
- "sp_model_kwargs": null,
100
- "tokenizer_class": "InternLM2Tokenizer",
101
  "unk_token": "<unk>"
102
  }
 
1
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "auto_map": {
3
  "AutoTokenizer": [
4
+ "tokenization_internlm.InternLMTokenizer",
5
+ null
6
  ]
7
  },
8
  "bos_token": "<s>",
 
9
  "clean_up_tokenization_spaces": false,
 
10
  "eos_token": "</s>",
11
  "model_max_length": 1000000000000000019884624838656,
12
  "pad_token": "</s>",
13
+ "tokenizer_class": "InternLMTokenizer",
 
14
  "unk_token": "<unk>"
15
  }