InternLM-Math commited on
Commit
6e811ae
1 Parent(s): c07997d

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "InternLM2ForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_internlm.InternLMConfig",
7
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM",
8
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM"
9
+ },
10
+ "bias": false,
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 14336,
17
+ "max_position_embeddings": 8192,
18
+ "model_type": "internlm",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 32,
21
+ "num_key_value_heads": 8,
22
+ "pad_token_id": 2,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_scaling": {
25
+ "factor": 1.0,
26
+ "type": "dynamic"
27
+ },
28
+ "rope_theta": 1000000,
29
+ "tie_word_embeddings": false,
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.35.2",
32
+ "use_cache": true,
33
+ "vocab_size": 92544
34
+ }
configuration_internlm.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ InternLM model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
28
+
29
+
30
+ class InternLMConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
+ an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 32000):
42
+ Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`InternLMModel`]
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 11008):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
59
+ `num_attention_heads`.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
73
+ Whether to tie weight embeddings
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import InternLMModel, InternLMConfig
78
+
79
+ >>> # Initializing a InternLM internlm-7b style configuration
80
+ >>> configuration = InternLMConfig()
81
+
82
+ >>> # Initializing a model from the internlm-7b style configuration
83
+ >>> model = InternLMModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+ model_type = "internlm"
89
+ _auto_class = "AutoConfig"
90
+
91
+ def __init__( # pylint: disable=W0102
92
+ self,
93
+ vocab_size=103168,
94
+ hidden_size=4096,
95
+ intermediate_size=11008,
96
+ num_hidden_layers=32,
97
+ num_attention_heads=32,
98
+ num_key_value_heads=None,
99
+ hidden_act="silu",
100
+ max_position_embeddings=2048,
101
+ initializer_range=0.02,
102
+ rms_norm_eps=1e-6,
103
+ use_cache=True,
104
+ pad_token_id=0,
105
+ bos_token_id=1,
106
+ eos_token_id=2,
107
+ tie_word_embeddings=False,
108
+ bias=True,
109
+ rope_theta=10000,
110
+ rope_scaling=None,
111
+ **kwargs,
112
+ ):
113
+ self.vocab_size = vocab_size
114
+ self.max_position_embeddings = max_position_embeddings
115
+ self.hidden_size = hidden_size
116
+ self.intermediate_size = intermediate_size
117
+ self.num_hidden_layers = num_hidden_layers
118
+ self.num_attention_heads = num_attention_heads
119
+ self.bias = bias
120
+
121
+ if num_key_value_heads is None:
122
+ num_key_value_heads = num_attention_heads
123
+ self.num_key_value_heads = num_key_value_heads
124
+
125
+ self.hidden_act = hidden_act
126
+ self.initializer_range = initializer_range
127
+ self.rms_norm_eps = rms_norm_eps
128
+ self.use_cache = use_cache
129
+ self.rope_theta = rope_theta
130
+ self.rope_scaling = rope_scaling
131
+ self._rope_scaling_validation()
132
+ super().__init__(
133
+ pad_token_id=pad_token_id,
134
+ bos_token_id=bos_token_id,
135
+ eos_token_id=eos_token_id,
136
+ tie_word_embeddings=tie_word_embeddings,
137
+ **kwargs,
138
+ )
139
+
140
+ def _rope_scaling_validation(self):
141
+ """
142
+ Validate the `rope_scaling` configuration.
143
+ """
144
+ if self.rope_scaling is None:
145
+ return
146
+
147
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
148
+ raise ValueError(
149
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
150
+ f"got {self.rope_scaling}"
151
+ )
152
+ rope_scaling_type = self.rope_scaling.get("type", None)
153
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
154
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
155
+ raise ValueError(
156
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
157
+ )
158
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
159
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.35.2"
7
+ }
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:805c5cb26f13ef9372d1b5cd5eed7c874626bc855bd8681d708d403f5e690ac9
3
+ size 1949337704
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:000c206e25036da7f7fb575a784f523837b14bffaf4baeed6e73be16c61a33c1
3
+ size 1946242696
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1f07197d4a1e68282b703f8c082cfdb49a10bc64552c79993f112da2002bb00
3
+ size 1979780440
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50c67c286659e0620bc1ad1a63715ac8e5792acb68f9a6bd3151f9e567db9013
3
+ size 1946242728
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd428388ca2dee0f5dfa37b7195c3bc19dc147076cf2329b258e2c3d7ac3fdee
3
+ size 1979780456
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f7080f96552a49b6dca1a1cd68dbbaf15d722d8ad5474b8fe14a0caef86ec92
3
+ size 1946242728
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aaff62b19c84b49fe6236425a2aaaa4856b586766cb8ddacf7aef2ac949c57a
3
+ size 1979780456
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b5aa160d13657d686c74c063030de0bde6999c3582c6993397dae8973d917b6
3
+ size 1748035640
model.safetensors.index.json ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15475417088
4
+ },
5
+ "weight_map": {
6
+ "model.layers.0.attention.wo.weight": "model-00001-of-00008.safetensors",
7
+ "model.layers.0.attention.wqkv.weight": "model-00001-of-00008.safetensors",
8
+ "model.layers.0.attention_norm.weight": "model-00001-of-00008.safetensors",
9
+ "model.layers.0.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
10
+ "model.layers.0.feed_forward.w2.weight": "model-00001-of-00008.safetensors",
11
+ "model.layers.0.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
12
+ "model.layers.0.ffn_norm.weight": "model-00001-of-00008.safetensors",
13
+ "model.layers.1.attention.wo.weight": "model-00001-of-00008.safetensors",
14
+ "model.layers.1.attention.wqkv.weight": "model-00001-of-00008.safetensors",
15
+ "model.layers.1.attention_norm.weight": "model-00001-of-00008.safetensors",
16
+ "model.layers.1.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
17
+ "model.layers.1.feed_forward.w2.weight": "model-00001-of-00008.safetensors",
18
+ "model.layers.1.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
19
+ "model.layers.1.ffn_norm.weight": "model-00001-of-00008.safetensors",
20
+ "model.layers.10.attention.wo.weight": "model-00003-of-00008.safetensors",
21
+ "model.layers.10.attention.wqkv.weight": "model-00003-of-00008.safetensors",
22
+ "model.layers.10.attention_norm.weight": "model-00003-of-00008.safetensors",
23
+ "model.layers.10.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
24
+ "model.layers.10.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
25
+ "model.layers.10.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
26
+ "model.layers.10.ffn_norm.weight": "model-00003-of-00008.safetensors",
27
+ "model.layers.11.attention.wo.weight": "model-00003-of-00008.safetensors",
28
+ "model.layers.11.attention.wqkv.weight": "model-00003-of-00008.safetensors",
29
+ "model.layers.11.attention_norm.weight": "model-00004-of-00008.safetensors",
30
+ "model.layers.11.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
31
+ "model.layers.11.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
32
+ "model.layers.11.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
33
+ "model.layers.11.ffn_norm.weight": "model-00004-of-00008.safetensors",
34
+ "model.layers.12.attention.wo.weight": "model-00004-of-00008.safetensors",
35
+ "model.layers.12.attention.wqkv.weight": "model-00004-of-00008.safetensors",
36
+ "model.layers.12.attention_norm.weight": "model-00004-of-00008.safetensors",
37
+ "model.layers.12.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
38
+ "model.layers.12.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
39
+ "model.layers.12.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
40
+ "model.layers.12.ffn_norm.weight": "model-00004-of-00008.safetensors",
41
+ "model.layers.13.attention.wo.weight": "model-00004-of-00008.safetensors",
42
+ "model.layers.13.attention.wqkv.weight": "model-00004-of-00008.safetensors",
43
+ "model.layers.13.attention_norm.weight": "model-00004-of-00008.safetensors",
44
+ "model.layers.13.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
45
+ "model.layers.13.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
46
+ "model.layers.13.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
47
+ "model.layers.13.ffn_norm.weight": "model-00004-of-00008.safetensors",
48
+ "model.layers.14.attention.wo.weight": "model-00004-of-00008.safetensors",
49
+ "model.layers.14.attention.wqkv.weight": "model-00004-of-00008.safetensors",
50
+ "model.layers.14.attention_norm.weight": "model-00004-of-00008.safetensors",
51
+ "model.layers.14.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
52
+ "model.layers.14.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
53
+ "model.layers.14.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
54
+ "model.layers.14.ffn_norm.weight": "model-00004-of-00008.safetensors",
55
+ "model.layers.15.attention.wo.weight": "model-00004-of-00008.safetensors",
56
+ "model.layers.15.attention.wqkv.weight": "model-00004-of-00008.safetensors",
57
+ "model.layers.15.attention_norm.weight": "model-00004-of-00008.safetensors",
58
+ "model.layers.15.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
59
+ "model.layers.15.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
60
+ "model.layers.15.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
61
+ "model.layers.15.ffn_norm.weight": "model-00004-of-00008.safetensors",
62
+ "model.layers.16.attention.wo.weight": "model-00004-of-00008.safetensors",
63
+ "model.layers.16.attention.wqkv.weight": "model-00004-of-00008.safetensors",
64
+ "model.layers.16.attention_norm.weight": "model-00005-of-00008.safetensors",
65
+ "model.layers.16.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
66
+ "model.layers.16.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
67
+ "model.layers.16.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
68
+ "model.layers.16.ffn_norm.weight": "model-00005-of-00008.safetensors",
69
+ "model.layers.17.attention.wo.weight": "model-00005-of-00008.safetensors",
70
+ "model.layers.17.attention.wqkv.weight": "model-00005-of-00008.safetensors",
71
+ "model.layers.17.attention_norm.weight": "model-00005-of-00008.safetensors",
72
+ "model.layers.17.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
73
+ "model.layers.17.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
74
+ "model.layers.17.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
75
+ "model.layers.17.ffn_norm.weight": "model-00005-of-00008.safetensors",
76
+ "model.layers.18.attention.wo.weight": "model-00005-of-00008.safetensors",
77
+ "model.layers.18.attention.wqkv.weight": "model-00005-of-00008.safetensors",
78
+ "model.layers.18.attention_norm.weight": "model-00005-of-00008.safetensors",
79
+ "model.layers.18.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
80
+ "model.layers.18.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
81
+ "model.layers.18.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
82
+ "model.layers.18.ffn_norm.weight": "model-00005-of-00008.safetensors",
83
+ "model.layers.19.attention.wo.weight": "model-00005-of-00008.safetensors",
84
+ "model.layers.19.attention.wqkv.weight": "model-00005-of-00008.safetensors",
85
+ "model.layers.19.attention_norm.weight": "model-00005-of-00008.safetensors",
86
+ "model.layers.19.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
87
+ "model.layers.19.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
88
+ "model.layers.19.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
89
+ "model.layers.19.ffn_norm.weight": "model-00005-of-00008.safetensors",
90
+ "model.layers.2.attention.wo.weight": "model-00001-of-00008.safetensors",
91
+ "model.layers.2.attention.wqkv.weight": "model-00001-of-00008.safetensors",
92
+ "model.layers.2.attention_norm.weight": "model-00002-of-00008.safetensors",
93
+ "model.layers.2.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
94
+ "model.layers.2.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
95
+ "model.layers.2.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
96
+ "model.layers.2.ffn_norm.weight": "model-00002-of-00008.safetensors",
97
+ "model.layers.20.attention.wo.weight": "model-00005-of-00008.safetensors",
98
+ "model.layers.20.attention.wqkv.weight": "model-00005-of-00008.safetensors",
99
+ "model.layers.20.attention_norm.weight": "model-00006-of-00008.safetensors",
100
+ "model.layers.20.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
101
+ "model.layers.20.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
102
+ "model.layers.20.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
103
+ "model.layers.20.ffn_norm.weight": "model-00006-of-00008.safetensors",
104
+ "model.layers.21.attention.wo.weight": "model-00006-of-00008.safetensors",
105
+ "model.layers.21.attention.wqkv.weight": "model-00006-of-00008.safetensors",
106
+ "model.layers.21.attention_norm.weight": "model-00006-of-00008.safetensors",
107
+ "model.layers.21.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
108
+ "model.layers.21.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
109
+ "model.layers.21.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
110
+ "model.layers.21.ffn_norm.weight": "model-00006-of-00008.safetensors",
111
+ "model.layers.22.attention.wo.weight": "model-00006-of-00008.safetensors",
112
+ "model.layers.22.attention.wqkv.weight": "model-00006-of-00008.safetensors",
113
+ "model.layers.22.attention_norm.weight": "model-00006-of-00008.safetensors",
114
+ "model.layers.22.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
115
+ "model.layers.22.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
116
+ "model.layers.22.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
117
+ "model.layers.22.ffn_norm.weight": "model-00006-of-00008.safetensors",
118
+ "model.layers.23.attention.wo.weight": "model-00006-of-00008.safetensors",
119
+ "model.layers.23.attention.wqkv.weight": "model-00006-of-00008.safetensors",
120
+ "model.layers.23.attention_norm.weight": "model-00006-of-00008.safetensors",
121
+ "model.layers.23.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
122
+ "model.layers.23.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
123
+ "model.layers.23.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
124
+ "model.layers.23.ffn_norm.weight": "model-00006-of-00008.safetensors",
125
+ "model.layers.24.attention.wo.weight": "model-00006-of-00008.safetensors",
126
+ "model.layers.24.attention.wqkv.weight": "model-00006-of-00008.safetensors",
127
+ "model.layers.24.attention_norm.weight": "model-00006-of-00008.safetensors",
128
+ "model.layers.24.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
129
+ "model.layers.24.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
130
+ "model.layers.24.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
131
+ "model.layers.24.ffn_norm.weight": "model-00006-of-00008.safetensors",
132
+ "model.layers.25.attention.wo.weight": "model-00006-of-00008.safetensors",
133
+ "model.layers.25.attention.wqkv.weight": "model-00006-of-00008.safetensors",
134
+ "model.layers.25.attention_norm.weight": "model-00007-of-00008.safetensors",
135
+ "model.layers.25.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
136
+ "model.layers.25.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
137
+ "model.layers.25.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
138
+ "model.layers.25.ffn_norm.weight": "model-00007-of-00008.safetensors",
139
+ "model.layers.26.attention.wo.weight": "model-00007-of-00008.safetensors",
140
+ "model.layers.26.attention.wqkv.weight": "model-00007-of-00008.safetensors",
141
+ "model.layers.26.attention_norm.weight": "model-00007-of-00008.safetensors",
142
+ "model.layers.26.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
143
+ "model.layers.26.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
144
+ "model.layers.26.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
145
+ "model.layers.26.ffn_norm.weight": "model-00007-of-00008.safetensors",
146
+ "model.layers.27.attention.wo.weight": "model-00007-of-00008.safetensors",
147
+ "model.layers.27.attention.wqkv.weight": "model-00007-of-00008.safetensors",
148
+ "model.layers.27.attention_norm.weight": "model-00007-of-00008.safetensors",
149
+ "model.layers.27.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
150
+ "model.layers.27.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
151
+ "model.layers.27.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
152
+ "model.layers.27.ffn_norm.weight": "model-00007-of-00008.safetensors",
153
+ "model.layers.28.attention.wo.weight": "model-00007-of-00008.safetensors",
154
+ "model.layers.28.attention.wqkv.weight": "model-00007-of-00008.safetensors",
155
+ "model.layers.28.attention_norm.weight": "model-00007-of-00008.safetensors",
156
+ "model.layers.28.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
157
+ "model.layers.28.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
158
+ "model.layers.28.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
159
+ "model.layers.28.ffn_norm.weight": "model-00007-of-00008.safetensors",
160
+ "model.layers.29.attention.wo.weight": "model-00007-of-00008.safetensors",
161
+ "model.layers.29.attention.wqkv.weight": "model-00007-of-00008.safetensors",
162
+ "model.layers.29.attention_norm.weight": "model-00008-of-00008.safetensors",
163
+ "model.layers.29.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
164
+ "model.layers.29.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
165
+ "model.layers.29.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
166
+ "model.layers.29.ffn_norm.weight": "model-00008-of-00008.safetensors",
167
+ "model.layers.3.attention.wo.weight": "model-00002-of-00008.safetensors",
168
+ "model.layers.3.attention.wqkv.weight": "model-00002-of-00008.safetensors",
169
+ "model.layers.3.attention_norm.weight": "model-00002-of-00008.safetensors",
170
+ "model.layers.3.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
171
+ "model.layers.3.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
172
+ "model.layers.3.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
173
+ "model.layers.3.ffn_norm.weight": "model-00002-of-00008.safetensors",
174
+ "model.layers.30.attention.wo.weight": "model-00008-of-00008.safetensors",
175
+ "model.layers.30.attention.wqkv.weight": "model-00008-of-00008.safetensors",
176
+ "model.layers.30.attention_norm.weight": "model-00008-of-00008.safetensors",
177
+ "model.layers.30.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
178
+ "model.layers.30.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
179
+ "model.layers.30.feed_forward.w3.weight": "model-00008-of-00008.safetensors",
180
+ "model.layers.30.ffn_norm.weight": "model-00008-of-00008.safetensors",
181
+ "model.layers.31.attention.wo.weight": "model-00008-of-00008.safetensors",
182
+ "model.layers.31.attention.wqkv.weight": "model-00008-of-00008.safetensors",
183
+ "model.layers.31.attention_norm.weight": "model-00008-of-00008.safetensors",
184
+ "model.layers.31.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
185
+ "model.layers.31.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
186
+ "model.layers.31.feed_forward.w3.weight": "model-00008-of-00008.safetensors",
187
+ "model.layers.31.ffn_norm.weight": "model-00008-of-00008.safetensors",
188
+ "model.layers.4.attention.wo.weight": "model-00002-of-00008.safetensors",
189
+ "model.layers.4.attention.wqkv.weight": "model-00002-of-00008.safetensors",
190
+ "model.layers.4.attention_norm.weight": "model-00002-of-00008.safetensors",
191
+ "model.layers.4.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
192
+ "model.layers.4.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
193
+ "model.layers.4.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
194
+ "model.layers.4.ffn_norm.weight": "model-00002-of-00008.safetensors",
195
+ "model.layers.5.attention.wo.weight": "model-00002-of-00008.safetensors",
196
+ "model.layers.5.attention.wqkv.weight": "model-00002-of-00008.safetensors",
197
+ "model.layers.5.attention_norm.weight": "model-00002-of-00008.safetensors",
198
+ "model.layers.5.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
199
+ "model.layers.5.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
200
+ "model.layers.5.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
201
+ "model.layers.5.ffn_norm.weight": "model-00002-of-00008.safetensors",
202
+ "model.layers.6.attention.wo.weight": "model-00002-of-00008.safetensors",
203
+ "model.layers.6.attention.wqkv.weight": "model-00002-of-00008.safetensors",
204
+ "model.layers.6.attention_norm.weight": "model-00002-of-00008.safetensors",
205
+ "model.layers.6.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
206
+ "model.layers.6.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
207
+ "model.layers.6.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
208
+ "model.layers.6.ffn_norm.weight": "model-00002-of-00008.safetensors",
209
+ "model.layers.7.attention.wo.weight": "model-00002-of-00008.safetensors",
210
+ "model.layers.7.attention.wqkv.weight": "model-00002-of-00008.safetensors",
211
+ "model.layers.7.attention_norm.weight": "model-00003-of-00008.safetensors",
212
+ "model.layers.7.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
213
+ "model.layers.7.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
214
+ "model.layers.7.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
215
+ "model.layers.7.ffn_norm.weight": "model-00003-of-00008.safetensors",
216
+ "model.layers.8.attention.wo.weight": "model-00003-of-00008.safetensors",
217
+ "model.layers.8.attention.wqkv.weight": "model-00003-of-00008.safetensors",
218
+ "model.layers.8.attention_norm.weight": "model-00003-of-00008.safetensors",
219
+ "model.layers.8.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
220
+ "model.layers.8.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
221
+ "model.layers.8.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
222
+ "model.layers.8.ffn_norm.weight": "model-00003-of-00008.safetensors",
223
+ "model.layers.9.attention.wo.weight": "model-00003-of-00008.safetensors",
224
+ "model.layers.9.attention.wqkv.weight": "model-00003-of-00008.safetensors",
225
+ "model.layers.9.attention_norm.weight": "model-00003-of-00008.safetensors",
226
+ "model.layers.9.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
227
+ "model.layers.9.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
228
+ "model.layers.9.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
229
+ "model.layers.9.ffn_norm.weight": "model-00003-of-00008.safetensors",
230
+ "model.norm.weight": "model-00008-of-00008.safetensors",
231
+ "model.tok_embeddings.weight": "model-00001-of-00008.safetensors",
232
+ "output.weight": "model-00008-of-00008.safetensors"
233
+ }
234
+ }
modeling_internlm2.py ADDED
@@ -0,0 +1,1265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch InternLM2 model."""
21
+ import math
22
+ import queue
23
+ import threading
24
+ import warnings
25
+ from typing import List, Optional, Tuple, Union
26
+
27
+ import torch
28
+ import torch.utils.checkpoint
29
+ from einops import rearrange
30
+ from torch import nn
31
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
32
+ from transformers.activations import ACT2FN
33
+ from transformers.modeling_outputs import (
34
+ BaseModelOutputWithPast,
35
+ CausalLMOutputWithPast,
36
+ SequenceClassifierOutputWithPast,
37
+ )
38
+ from transformers.modeling_utils import PreTrainedModel
39
+ from transformers.utils import (
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+
46
+ try:
47
+ from transformers.generation.streamers import BaseStreamer
48
+ except: # noqa # pylint: disable=bare-except
49
+ BaseStreamer = None
50
+
51
+ from .configuration_internlm import InternLMConfig as InternLM2Config
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CONFIG_FOR_DOC = "InternLM2Config"
56
+
57
+
58
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
59
+ def _make_causal_mask(
60
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
61
+ ):
62
+ """
63
+ Make causal mask used for bi-directional self-attention.
64
+ """
65
+ bsz, tgt_len = input_ids_shape
66
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
67
+ mask_cond = torch.arange(mask.size(-1), device=device)
68
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
69
+ mask = mask.to(dtype)
70
+
71
+ if past_key_values_length > 0:
72
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
73
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
74
+
75
+
76
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
77
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
78
+ """
79
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
80
+ """
81
+ bsz, src_len = mask.size()
82
+ tgt_len = tgt_len if tgt_len is not None else src_len
83
+
84
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
85
+
86
+ inverted_mask = 1.0 - expanded_mask
87
+
88
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
89
+
90
+
91
+ class InternLM2RMSNorm(nn.Module):
92
+ def __init__(self, hidden_size, eps=1e-6):
93
+ """
94
+ InternLM2RMSNorm is equivalent to T5LayerNorm
95
+ """
96
+ super().__init__()
97
+ self.weight = nn.Parameter(torch.ones(hidden_size))
98
+ self.variance_epsilon = eps
99
+
100
+ def forward(self, hidden_states):
101
+ input_dtype = hidden_states.dtype
102
+ hidden_states = hidden_states.to(torch.float32)
103
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
104
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
105
+ return self.weight * hidden_states.to(input_dtype)
106
+
107
+
108
+ class InternLM2RotaryEmbedding(nn.Module):
109
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
110
+ super().__init__()
111
+
112
+ self.dim = dim
113
+ self.max_position_embeddings = max_position_embeddings
114
+ self.base = base
115
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
116
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
117
+
118
+ # Build here to make `torch.jit.trace` work.
119
+ self._set_cos_sin_cache(
120
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
121
+ )
122
+
123
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
124
+ self.max_seq_len_cached = seq_len
125
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
126
+
127
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
128
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
129
+ emb = torch.cat((freqs, freqs), dim=-1)
130
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
131
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
132
+
133
+ def forward(self, x, seq_len=None):
134
+ # x: [bs, num_attention_heads, seq_len, head_size]
135
+ if seq_len > self.max_seq_len_cached:
136
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
137
+
138
+ return (
139
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
140
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
141
+ )
142
+
143
+
144
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
145
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
146
+
147
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
148
+ self.scaling_factor = scaling_factor
149
+ super().__init__(dim, max_position_embeddings, base, device)
150
+
151
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
152
+ self.max_seq_len_cached = seq_len
153
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
154
+ t = t / self.scaling_factor
155
+
156
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
157
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
158
+ emb = torch.cat((freqs, freqs), dim=-1)
159
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
160
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
161
+
162
+
163
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
164
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
165
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
166
+ """
167
+
168
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
169
+ self.scaling_factor = scaling_factor
170
+ super().__init__(dim, max_position_embeddings, base, device)
171
+
172
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
173
+ self.max_seq_len_cached = seq_len
174
+
175
+ if seq_len > self.max_position_embeddings:
176
+ base = self.base * (
177
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
178
+ ) ** (self.dim / (self.dim - 2))
179
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
180
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
181
+
182
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
183
+
184
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
185
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
186
+ emb = torch.cat((freqs, freqs), dim=-1)
187
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
188
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
189
+
190
+
191
+ def rotate_half(x):
192
+ """Rotates half the hidden dims of the input."""
193
+ x1 = x[..., : x.shape[-1] // 2]
194
+ x2 = x[..., x.shape[-1] // 2 :]
195
+ return torch.cat((-x2, x1), dim=-1)
196
+
197
+
198
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
199
+ # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
200
+ cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
201
+ sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
202
+ cos = cos.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1)
203
+ sin = sin.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1)
204
+ if q.size(2) == 1:
205
+ q_embed = (q * cos[:, :, -1, :]) + (rotate_half(q) * sin[:, :, -1, :])
206
+ else:
207
+ q_embed = (q * cos) + (rotate_half(q) * sin)
208
+
209
+ if k.size(2) == 1:
210
+ k_embed = (k * cos[:, :, -1, :]) + (rotate_half(k) * sin[:, :, -1, :])
211
+ else:
212
+ k_embed = (k * cos) + (rotate_half(k) * sin)
213
+
214
+ return q_embed, k_embed
215
+
216
+
217
+ class InternLM2MLP(nn.Module):
218
+ def __init__(self, config):
219
+ super().__init__()
220
+ self.config = config
221
+ self.hidden_size = config.hidden_size
222
+ self.intermediate_size = config.intermediate_size
223
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
224
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
225
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
226
+ self.act_fn = ACT2FN[config.hidden_act]
227
+
228
+ def forward(self, x):
229
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
230
+
231
+ return down_proj
232
+
233
+
234
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
235
+ """
236
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
237
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
238
+ """
239
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
240
+ if n_rep == 1:
241
+ return hidden_states
242
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
243
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
244
+
245
+
246
+ class InternLM2Attention(nn.Module):
247
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
248
+
249
+ def __init__(self, config: InternLM2Config):
250
+ super().__init__()
251
+ self.config = config
252
+ self.hidden_size = config.hidden_size
253
+ self.num_heads = config.num_attention_heads
254
+ self.head_dim = self.hidden_size // self.num_heads
255
+ self.num_key_value_heads = config.num_key_value_heads
256
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
257
+ self.max_position_embeddings = config.max_position_embeddings
258
+ self.is_causal = True
259
+
260
+ if (self.head_dim * self.num_heads) != self.hidden_size:
261
+ raise ValueError(
262
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
263
+ f" and `num_heads`: {self.num_heads})."
264
+ )
265
+
266
+ self.wqkv = nn.Linear(
267
+ self.hidden_size,
268
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
269
+ bias=config.bias,
270
+ )
271
+
272
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
273
+ self._init_rope()
274
+
275
+ def _init_rope(self):
276
+ if self.config.rope_scaling is None:
277
+ self.rotary_emb = InternLM2RotaryEmbedding(
278
+ self.head_dim,
279
+ max_position_embeddings=self.max_position_embeddings,
280
+ base=self.config.rope_theta,
281
+ )
282
+ else:
283
+ scaling_type = self.config.rope_scaling["type"]
284
+ scaling_factor = self.config.rope_scaling["factor"]
285
+ if scaling_type == "dynamic":
286
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
287
+ self.head_dim,
288
+ max_position_embeddings=self.max_position_embeddings,
289
+ base=self.config.rope_theta,
290
+ scaling_factor=scaling_factor
291
+ )
292
+ else:
293
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic'.")
294
+ return self.rotary_emb
295
+
296
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
297
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
298
+
299
+ def forward(
300
+ self,
301
+ hidden_states: torch.Tensor,
302
+ attention_mask: Optional[torch.Tensor] = None,
303
+ position_ids: Optional[torch.LongTensor] = None,
304
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
305
+ output_attentions: bool = False,
306
+ use_cache: bool = False,
307
+ **kwargs,
308
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
309
+ if "padding_mask" in kwargs:
310
+ warnings.warn(
311
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
312
+ "Please make sure use `attention_mask` instead.`"
313
+ )
314
+
315
+ bsz, q_len, _ = hidden_states.size()
316
+
317
+ qkv_states = self.wqkv(hidden_states)
318
+
319
+ qkv_states = rearrange(
320
+ qkv_states,
321
+ "b q (h gs d) -> b q h gs d",
322
+ gs=2 + self.num_key_value_groups,
323
+ d=self.head_dim,
324
+ )
325
+
326
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
327
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
328
+ key_states = qkv_states[..., -2, :]
329
+ value_states = qkv_states[..., -1, :]
330
+
331
+ query_states = query_states.transpose(1, 2)
332
+ key_states = key_states.transpose(1, 2)
333
+ value_states = value_states.transpose(1, 2)
334
+
335
+ kv_seq_len = key_states.shape[-2]
336
+ if past_key_value is not None:
337
+ kv_seq_len += past_key_value[0].shape[-2]
338
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
339
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
340
+
341
+ if past_key_value is not None:
342
+ # reuse k, v, self_attention
343
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
344
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
345
+
346
+ past_key_value = (key_states, value_states) if use_cache else None
347
+
348
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
349
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
350
+
351
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
352
+
353
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
354
+ raise ValueError(
355
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
356
+ f" {attn_weights.size()}"
357
+ )
358
+
359
+ if attention_mask is not None:
360
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
361
+ raise ValueError(
362
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
363
+ )
364
+ attn_weights = attn_weights + attention_mask
365
+
366
+ # upcast attention to fp32
367
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
368
+ attn_output = torch.matmul(attn_weights, value_states)
369
+
370
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
371
+ raise ValueError(
372
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
373
+ f" {attn_output.size()}"
374
+ )
375
+
376
+ attn_output = attn_output.transpose(1, 2).contiguous()
377
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
378
+
379
+ attn_output = self.wo(attn_output)
380
+
381
+ if not output_attentions:
382
+ attn_weights = None
383
+
384
+ return attn_output, attn_weights, past_key_value
385
+
386
+
387
+ class InternLM2FlashAttention2(InternLM2Attention):
388
+ """
389
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
390
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
391
+ flash attention and deal with padding tokens in case the input contains any of them.
392
+ """
393
+
394
+ def forward(
395
+ self,
396
+ hidden_states: torch.Tensor,
397
+ attention_mask: Optional[torch.LongTensor] = None,
398
+ position_ids: Optional[torch.LongTensor] = None,
399
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
400
+ output_attentions: bool = False,
401
+