Yuliang Fang commited on
Commit
5ca8ac6
1 Parent(s): ab2e6a5

add models

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. model_repository/postprocessing/1/__pycache__/model.cpython-310.pyc +0 -0
  2. model_repository/postprocessing/1/model.py +129 -0
  3. model_repository/postprocessing/1/tokenizer/config.json +37 -0
  4. model_repository/postprocessing/1/tokenizer/configuration_internlm.py +164 -0
  5. model_repository/postprocessing/1/tokenizer/generation_config.json +7 -0
  6. model_repository/postprocessing/1/tokenizer/modeling_internlm2.py +1385 -0
  7. model_repository/postprocessing/1/tokenizer/placeholder +0 -0
  8. model_repository/postprocessing/1/tokenizer/pytorch_model.bin.index.json +554 -0
  9. model_repository/postprocessing/1/tokenizer/special_tokens_map.json +30 -0
  10. model_repository/postprocessing/1/tokenizer/tokenization_internlm.py +240 -0
  11. model_repository/postprocessing/1/tokenizer/tokenizer.model +3 -0
  12. model_repository/postprocessing/1/tokenizer/tokenizer.py +400 -0
  13. model_repository/postprocessing/1/tokenizer/tokenizer_config.json +90 -0
  14. model_repository/postprocessing/config.pbtxt +36 -0
  15. model_repository/preprocessing/1/__pycache__/model.cpython-310.pyc +0 -0
  16. model_repository/preprocessing/1/model.py +151 -0
  17. model_repository/preprocessing/1/tokenizer/config.json +37 -0
  18. model_repository/preprocessing/1/tokenizer/configuration_internlm.py +164 -0
  19. model_repository/preprocessing/1/tokenizer/generation_config.json +7 -0
  20. model_repository/preprocessing/1/tokenizer/modeling_internlm2.py +1385 -0
  21. model_repository/preprocessing/1/tokenizer/placeholder +0 -0
  22. model_repository/preprocessing/1/tokenizer/pytorch_model.bin.index.json +554 -0
  23. model_repository/preprocessing/1/tokenizer/special_tokens_map.json +30 -0
  24. model_repository/preprocessing/1/tokenizer/tokenization_internlm.py +240 -0
  25. model_repository/preprocessing/1/tokenizer/tokenizer.model +3 -0
  26. model_repository/preprocessing/1/tokenizer/tokenizer.py +400 -0
  27. model_repository/preprocessing/1/tokenizer/tokenizer_config.json +90 -0
  28. model_repository/preprocessing/config.pbtxt +37 -0
  29. model_repository/turbomind/1/placeholder +0 -0
  30. model_repository/turbomind/1/weights/config.ini +3 -0
  31. model_repository/turbomind/1/weights/layers.0.attention.w_qkv.0.qweight +3 -0
  32. model_repository/turbomind/1/weights/layers.0.attention.w_qkv.0.scales_zeros +3 -0
  33. model_repository/turbomind/1/weights/layers.0.attention.wo.0.qweight +3 -0
  34. model_repository/turbomind/1/weights/layers.0.attention.wo.0.scales_zeros +3 -0
  35. model_repository/turbomind/1/weights/layers.0.attention_norm.weight +3 -0
  36. model_repository/turbomind/1/weights/layers.0.feed_forward.w13.0.qweight +3 -0
  37. model_repository/turbomind/1/weights/layers.0.feed_forward.w13.0.scales_zeros +3 -0
  38. model_repository/turbomind/1/weights/layers.0.feed_forward.w2.0.qweight +3 -0
  39. model_repository/turbomind/1/weights/layers.0.feed_forward.w2.0.scales_zeros +3 -0
  40. model_repository/turbomind/1/weights/layers.0.ffn_norm.weight +3 -0
  41. model_repository/turbomind/1/weights/layers.0.past_kv_scale.0.weight +3 -0
  42. model_repository/turbomind/1/weights/layers.1.attention.w_qkv.0.qweight +3 -0
  43. model_repository/turbomind/1/weights/layers.1.attention.w_qkv.0.scales_zeros +3 -0
  44. model_repository/turbomind/1/weights/layers.1.attention.wo.0.qweight +3 -0
  45. model_repository/turbomind/1/weights/layers.1.attention.wo.0.scales_zeros +3 -0
  46. model_repository/turbomind/1/weights/layers.1.attention_norm.weight +3 -0
  47. model_repository/turbomind/1/weights/layers.1.feed_forward.w13.0.qweight +3 -0
  48. model_repository/turbomind/1/weights/layers.1.feed_forward.w13.0.scales_zeros +3 -0
  49. model_repository/turbomind/1/weights/layers.1.feed_forward.w2.0.qweight +3 -0
  50. model_repository/turbomind/1/weights/layers.1.feed_forward.w2.0.scales_zeros +3 -0
model_repository/postprocessing/1/__pycache__/model.cpython-310.pyc ADDED
Binary file (4.14 kB). View file
 
model_repository/postprocessing/1/model.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import json
3
+ import os.path as osp
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ import triton_python_backend_utils as pb_utils
8
+
9
+ # This tokenizer is `lmdeploy/turbomind/tokenizer.py`. When an LLM is served
10
+ # by triton inference server, it has to be converted first by running
11
+ # `python lmdeploy/serve/turbomind/deploy.py`. Then
12
+ # `lmdeploy/turbomind/tokenizer.py` will be copied to `tokenizer/tokenizer.py`
13
+ from .tokenizer.tokenizer import Tokenizer
14
+
15
+
16
+ class TritonPythonModel:
17
+ """Your Python model must use the same class name.
18
+
19
+ Every Python model that is created must have "TritonPythonModel" as the
20
+ class name.
21
+ """
22
+
23
+ def initialize(self, args):
24
+ """`initialize` is called only once when the model is being loaded.
25
+ Implementing `initialize` function is optional. This function allows
26
+ the model to initialize any state associated with this model.
27
+ Parameters
28
+ ----------
29
+ args : dict
30
+ Both keys and values are strings. The dictionary keys and values are:
31
+ * model_config: A JSON string containing the model configuration
32
+ * model_instance_kind: A string containing model instance kind
33
+ * model_instance_device_id: A string containing model instance device
34
+ ID
35
+ * model_repository: Model repository path
36
+ * model_version: Model version
37
+ * model_name: Model name
38
+ """
39
+ # Parse model configs
40
+ self.model_config = model_config = json.loads(args['model_config'])
41
+
42
+ # Parse model output configs
43
+ output_config = pb_utils.get_output_config_by_name(
44
+ model_config, 'OUTPUT')
45
+
46
+ # Convert Triton types to numpy types
47
+ self.output_dtype = pb_utils.triton_string_to_numpy(
48
+ output_config['data_type'])
49
+
50
+ cur_folder = Path(__file__).parent
51
+
52
+ self.tokenizer = Tokenizer(
53
+ osp.join(
54
+ cur_folder, self.model_config['parameters']['tokenizer_path']
55
+ ['string_value']))
56
+
57
+ def execute(self, requests):
58
+ """`execute` must be implemented in every Python model. `execute`
59
+ function receives a list of pb_utils.InferenceRequest as the only
60
+ argument. This function is called when an inference is requested
61
+ for this model. Depending on the batching configuration (e.g. Dynamic
62
+ Batching) used, `requests` may contain multiple requests. Every
63
+ Python model, must create one pb_utils.InferenceResponse for every
64
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
65
+ set the error argument when creating a pb_utils.InferenceResponse.
66
+ Parameters
67
+ ----------
68
+ requests : list
69
+ A list of pb_utils.InferenceRequest
70
+ Returns
71
+ -------
72
+ list
73
+ A list of pb_utils.InferenceResponse. The length of this list must
74
+ be the same as `requests`
75
+ """
76
+
77
+ responses = []
78
+
79
+ # Every Python backend must iterate over everyone of the requests
80
+ # and create a pb_utils.InferenceResponse for each of them.
81
+ for idx, request in enumerate(requests):
82
+ # Get input tensors
83
+ tokens_batch = pb_utils.get_input_tensor_by_name(
84
+ request, 'TOKENS_BATCH').as_numpy()
85
+ sequence_length = pb_utils.get_input_tensor_by_name(
86
+ request, 'sequence_length').as_numpy()
87
+
88
+ # Postprocessing output data.
89
+ outputs = self._postprocessing(tokens_batch.tolist(),
90
+ sequence_length)
91
+
92
+ # Create output tensors. You need pb_utils.Tensor
93
+ # objects to create pb_utils.InferenceResponse.
94
+ output_tensor = pb_utils.Tensor(
95
+ 'OUTPUT',
96
+ np.array(outputs).astype(self.output_dtype))
97
+
98
+ # Create InferenceResponse. You can set an error here in case
99
+ # there was a problem with handling this inference request.
100
+ # Below is an example of how you can set errors in inference
101
+ # response:
102
+ #
103
+ # pb_utils.InferenceResponse(
104
+ # output_tensors=..., TritonError("An error occurred"))
105
+ inference_response = pb_utils.InferenceResponse(
106
+ output_tensors=[output_tensor])
107
+ responses.append(inference_response)
108
+
109
+ # You should return a list of pb_utils.InferenceResponse. Length
110
+ # of this list must match the length of `requests` list.
111
+ return responses
112
+
113
+ def finalize(self):
114
+ """`finalize` is called only once when the model is being unloaded.
115
+
116
+ Implementing `finalize` function is optional. This function allows the
117
+ model to perform any necessary clean ups before exit.
118
+ """
119
+ print('Cleaning up...')
120
+
121
+ def _postprocessing(self, tokens_batch, sequence_length):
122
+ """decode token ids into texts."""
123
+ outputs = []
124
+ for beam_tokens, beam_len in zip(tokens_batch, sequence_length):
125
+ for tokens, _len in zip(beam_tokens, beam_len):
126
+ output = self.tokenizer.decode(tokens, _len)
127
+ output = output.encode('utf8')
128
+ outputs.append(output)
129
+ return outputs
model_repository/postprocessing/1/tokenizer/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/psy/internlm2-7b/work_dirs/internlm2_chat_7b_qlora_oasst1_512_e3_copy/hf_2/merge",
3
+ "architectures": [
4
+ "InternLM2ForCausalLM"
5
+ ],
6
+ "attn_implementation": "eager",
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_internlm.InternLMConfig",
9
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
10
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
11
+ },
12
+ "bias": false,
13
+ "bos_token_id": 1,
14
+ "eos_token_id": 2,
15
+ "fp16": true,
16
+ "hidden_act": "silu",
17
+ "hidden_size": 4096,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 14336,
20
+ "max_position_embeddings": 32768,
21
+ "model_type": "internlm",
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 32,
24
+ "num_key_value_heads": 8,
25
+ "pad_token_id": 2,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 2.0,
29
+ "type": "dynamic"
30
+ },
31
+ "rope_theta": 1000000,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "float16",
34
+ "transformers_version": "4.37.2",
35
+ "use_cache": false,
36
+ "vocab_size": 92544
37
+ }
model_repository/postprocessing/1/tokenizer/configuration_internlm.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ InternLM model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
28
+
29
+
30
+ class InternLMConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
+ an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 32000):
42
+ Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`InternLMModel`]
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 11008):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
59
+ `num_attention_heads`.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
73
+ Whether to tie weight embeddings
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import InternLMModel, InternLMConfig
78
+
79
+ >>> # Initializing a InternLM internlm-7b style configuration
80
+ >>> configuration = InternLMConfig()
81
+
82
+ >>> # Initializing a model from the internlm-7b style configuration
83
+ >>> model = InternLMModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+ model_type = "internlm"
89
+ _auto_class = "AutoConfig"
90
+
91
+ def __init__( # pylint: disable=W0102
92
+ self,
93
+ vocab_size=103168,
94
+ hidden_size=4096,
95
+ intermediate_size=11008,
96
+ num_hidden_layers=32,
97
+ num_attention_heads=32,
98
+ num_key_value_heads=None,
99
+ hidden_act="silu",
100
+ max_position_embeddings=2048,
101
+ initializer_range=0.02,
102
+ rms_norm_eps=1e-6,
103
+ use_cache=True,
104
+ pad_token_id=0,
105
+ bos_token_id=1,
106
+ eos_token_id=2,
107
+ tie_word_embeddings=False,
108
+ bias=True,
109
+ rope_theta=10000,
110
+ rope_scaling=None,
111
+ attn_implementation="eager",
112
+ **kwargs,
113
+ ):
114
+ self.vocab_size = vocab_size
115
+ self.max_position_embeddings = max_position_embeddings
116
+ self.hidden_size = hidden_size
117
+ self.intermediate_size = intermediate_size
118
+ self.num_hidden_layers = num_hidden_layers
119
+ self.num_attention_heads = num_attention_heads
120
+ self.bias = bias
121
+
122
+ if num_key_value_heads is None:
123
+ num_key_value_heads = num_attention_heads
124
+ self.num_key_value_heads = num_key_value_heads
125
+
126
+ self.hidden_act = hidden_act
127
+ self.initializer_range = initializer_range
128
+ self.rms_norm_eps = rms_norm_eps
129
+ self.use_cache = use_cache
130
+ self.rope_theta = rope_theta
131
+ self.rope_scaling = rope_scaling
132
+ self._rope_scaling_validation()
133
+
134
+ self.attn_implementation = attn_implementation
135
+ if self.attn_implementation is None:
136
+ self.attn_implementation = "eager"
137
+ super().__init__(
138
+ pad_token_id=pad_token_id,
139
+ bos_token_id=bos_token_id,
140
+ eos_token_id=eos_token_id,
141
+ tie_word_embeddings=tie_word_embeddings,
142
+ **kwargs,
143
+ )
144
+
145
+ def _rope_scaling_validation(self):
146
+ """
147
+ Validate the `rope_scaling` configuration.
148
+ """
149
+ if self.rope_scaling is None:
150
+ return
151
+
152
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
153
+ raise ValueError(
154
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
155
+ f"got {self.rope_scaling}"
156
+ )
157
+ rope_scaling_type = self.rope_scaling.get("type", None)
158
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
159
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
160
+ raise ValueError(
161
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
162
+ )
163
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
164
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
model_repository/postprocessing/1/tokenizer/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.37.2"
7
+ }
model_repository/postprocessing/1/tokenizer/modeling_internlm2.py ADDED
@@ -0,0 +1,1385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (
31
+ BaseModelOutputWithPast,
32
+ CausalLMOutputWithPast,
33
+ SequenceClassifierOutputWithPast,
34
+ )
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.utils import (
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+
43
+ try:
44
+ from transformers.generation.streamers import BaseStreamer
45
+ except: # noqa # pylint: disable=bare-except
46
+ BaseStreamer = None
47
+
48
+ from .configuration_internlm import InternLMConfig as InternLM2Config
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CONFIG_FOR_DOC = "InternLM2Config"
53
+
54
+ flash_attn_func, flash_attn_varlen_func = None, None
55
+ pad_input, index_first_axis, unpad_input = None, None, None
56
+ def _import_flash_attn():
57
+ global flash_attn_func, flash_attn_varlen_func
58
+ global pad_input, index_first_axis, unpad_input
59
+ try:
60
+ from flash_attn import flash_attn_func as _flash_attn_func, flash_attn_varlen_func as _flash_attn_varlen_func
61
+ from flash_attn.bert_padding import pad_input as _pad_input, index_first_axis as _index_first_axis, unpad_input as _unpad_input
62
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
63
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
64
+ except ImportError:
65
+ raise ImportError("flash_attn is not installed.")
66
+
67
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
68
+ def _get_unpad_data(attention_mask):
69
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
70
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
71
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
72
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
73
+ return (
74
+ indices,
75
+ cu_seqlens,
76
+ max_seqlen_in_batch,
77
+ )
78
+
79
+
80
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
81
+ def _make_causal_mask(
82
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
83
+ ):
84
+ """
85
+ Make causal mask used for bi-directional self-attention.
86
+ """
87
+ bsz, tgt_len = input_ids_shape
88
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
89
+ mask_cond = torch.arange(mask.size(-1), device=device)
90
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
91
+ mask = mask.to(dtype)
92
+
93
+ if past_key_values_length > 0:
94
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
95
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
96
+
97
+
98
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
99
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
100
+ """
101
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
102
+ """
103
+ bsz, src_len = mask.size()
104
+ tgt_len = tgt_len if tgt_len is not None else src_len
105
+
106
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
107
+
108
+ inverted_mask = 1.0 - expanded_mask
109
+
110
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
111
+
112
+
113
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
114
+ class InternLM2RMSNorm(nn.Module):
115
+ def __init__(self, hidden_size, eps=1e-6):
116
+ """
117
+ InternLM2RMSNorm is equivalent to T5LayerNorm
118
+ """
119
+ super().__init__()
120
+ self.weight = nn.Parameter(torch.ones(hidden_size))
121
+ self.variance_epsilon = eps
122
+
123
+ def forward(self, hidden_states):
124
+ input_dtype = hidden_states.dtype
125
+ hidden_states = hidden_states.to(torch.float32)
126
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
127
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
128
+ return self.weight * hidden_states.to(input_dtype)
129
+
130
+
131
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
132
+ class InternLM2RotaryEmbedding(nn.Module):
133
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
134
+ super().__init__()
135
+
136
+ self.dim = dim
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.base = base
139
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
140
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
141
+
142
+ # Build here to make `torch.jit.trace` work.
143
+ self._set_cos_sin_cache(
144
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
145
+ )
146
+
147
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
148
+ self.max_seq_len_cached = seq_len
149
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
150
+
151
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
152
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
153
+ emb = torch.cat((freqs, freqs), dim=-1)
154
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
155
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
156
+
157
+ def forward(self, x, seq_len=None):
158
+ # x: [bs, num_attention_heads, seq_len, head_size]
159
+ if seq_len > self.max_seq_len_cached:
160
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
161
+
162
+ return (
163
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
164
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
165
+ )
166
+
167
+
168
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
169
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
170
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
171
+
172
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
173
+ self.scaling_factor = scaling_factor
174
+ super().__init__(dim, max_position_embeddings, base, device)
175
+
176
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
177
+ self.max_seq_len_cached = seq_len
178
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
179
+ t = t / self.scaling_factor
180
+
181
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
182
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
183
+ emb = torch.cat((freqs, freqs), dim=-1)
184
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
185
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
186
+
187
+
188
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
189
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
190
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
191
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
192
+ """
193
+
194
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
195
+ self.scaling_factor = scaling_factor
196
+ super().__init__(dim, max_position_embeddings, base, device)
197
+
198
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
199
+ self.max_seq_len_cached = seq_len
200
+
201
+ if seq_len > self.max_position_embeddings:
202
+ base = self.base * (
203
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
204
+ ) ** (self.dim / (self.dim - 2))
205
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
206
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
207
+
208
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
209
+
210
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
211
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
212
+ emb = torch.cat((freqs, freqs), dim=-1)
213
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
214
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
215
+
216
+
217
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
218
+ def rotate_half(x):
219
+ """Rotates half the hidden dims of the input."""
220
+ x1 = x[..., : x.shape[-1] // 2]
221
+ x2 = x[..., x.shape[-1] // 2 :]
222
+ return torch.cat((-x2, x1), dim=-1)
223
+
224
+
225
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
226
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
227
+ """Applies Rotary Position Embedding to the query and key tensors."""
228
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
229
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
230
+ q_embed = (q * cos) + (rotate_half(q) * sin)
231
+ k_embed = (k * cos) + (rotate_half(k) * sin)
232
+ return q_embed, k_embed
233
+
234
+
235
+ class InternLM2MLP(nn.Module):
236
+ def __init__(self, config):
237
+ super().__init__()
238
+ self.config = config
239
+ self.hidden_size = config.hidden_size
240
+ self.intermediate_size = config.intermediate_size
241
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
242
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
243
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
244
+ self.act_fn = ACT2FN[config.hidden_act]
245
+
246
+ def forward(self, x):
247
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
248
+
249
+ return down_proj
250
+
251
+
252
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
253
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
254
+ """
255
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
256
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
257
+ """
258
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
259
+ if n_rep == 1:
260
+ return hidden_states
261
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
262
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
263
+
264
+
265
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
266
+ class InternLM2Attention(nn.Module):
267
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
268
+
269
+ def __init__(self, config: InternLM2Config):
270
+ super().__init__()
271
+ self.config = config
272
+ self.hidden_size = config.hidden_size
273
+ self.num_heads = config.num_attention_heads
274
+ self.head_dim = self.hidden_size // self.num_heads
275
+ self.num_key_value_heads = config.num_key_value_heads
276
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
277
+ self.max_position_embeddings = config.max_position_embeddings
278
+ self.is_causal = True
279
+
280
+ if (self.head_dim * self.num_heads) != self.hidden_size:
281
+ raise ValueError(
282
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
283
+ f" and `num_heads`: {self.num_heads})."
284
+ )
285
+
286
+ self.wqkv = nn.Linear(
287
+ self.hidden_size,
288
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
289
+ bias=config.bias,
290
+ )
291
+
292
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
293
+ self._init_rope()
294
+
295
+ def _init_rope(self):
296
+ if self.config.rope_scaling is None:
297
+ self.rotary_emb = InternLM2RotaryEmbedding(
298
+ self.head_dim,
299
+ max_position_embeddings=self.max_position_embeddings,
300
+ base=self.config.rope_theta,
301
+ )
302
+ else:
303
+ scaling_type = self.config.rope_scaling["type"]
304
+ scaling_factor = self.config.rope_scaling["factor"]
305
+ if scaling_type == "dynamic":
306
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
307
+ self.head_dim,
308
+ max_position_embeddings=self.max_position_embeddings,
309
+ base=self.config.rope_theta,
310
+ scaling_factor=scaling_factor,
311
+ )
312
+ elif scaling_type == "linear":
313
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
314
+ self.head_dim,
315
+ max_position_embeddings=self.max_position_embeddings,
316
+ base=self.config.rope_theta,
317
+ scaling_factor=scaling_factor,
318
+ )
319
+ else:
320
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
321
+ return self.rotary_emb
322
+
323
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
324
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
325
+
326
+ def forward(
327
+ self,
328
+ hidden_states: torch.Tensor,
329
+ attention_mask: Optional[torch.Tensor] = None,
330
+ position_ids: Optional[torch.LongTensor] = None,
331
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
332
+ output_attentions: bool = False,
333
+ use_cache: bool = False,
334
+ **kwargs,
335
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
336
+ if "padding_mask" in kwargs:
337
+ warnings.warn(
338
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
339
+ "Please make sure use `attention_mask` instead.`"
340
+ )
341
+
342
+ bsz, q_len, _ = hidden_states.size()
343
+
344
+ qkv_states = self.wqkv(hidden_states)
345
+
346
+ qkv_states = rearrange(
347
+ qkv_states,
348
+ "b q (h gs d) -> b q h gs d",
349
+ gs=2 + self.num_key_value_groups,
350
+ d=self.head_dim,
351
+ )
352
+
353
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
354
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
355
+ key_states = qkv_states[..., -2, :]
356
+ value_states = qkv_states[..., -1, :]
357
+
358
+ query_states = query_states.transpose(1, 2)
359
+ key_states = key_states.transpose(1, 2)
360
+ value_states = value_states.transpose(1, 2)
361
+
362
+ kv_seq_len = key_states.shape[-2]
363
+ if past_key_value is not None:
364
+ kv_seq_len += past_key_value[0].shape[-2]
365
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
366
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
367
+
368
+ if past_key_value is not None:
369
+ # reuse k, v, self_attention
370
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
371
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
372
+
373
+ past_key_value = (key_states, value_states) if use_cache else None
374
+
375
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
376
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
377
+
378
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
379
+
380
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
381
+ raise ValueError(
382
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
383
+ f" {attn_weights.size()}"
384
+ )
385
+
386
+ if attention_mask is not None:
387
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
388
+ raise ValueError(
389
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
390
+ )
391
+ attn_weights = attn_weights + attention_mask
392
+
393
+ # upcast attention to fp32
394
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
395
+ attn_output = torch.matmul(attn_weights, value_states)
396
+
397
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
398
+ raise ValueError(
399
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
400
+ f" {attn_output.size()}"
401
+ )
402
+
403
+ attn_output = attn_output.transpose(1, 2).contiguous()
404
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
405
+
406
+ attn_output = self.wo(attn_output)
407
+
408
+ if not output_attentions:
409
+ attn_weights = None
410
+
411
+ return attn_output, attn_weights, past_key_value
412
+
413
+
414
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
415
+ class InternLM2FlashAttention2(InternLM2Attention):
416
+ """
417
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
418
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
419
+ flash attention and deal with padding tokens in case the input contains any of them.
420
+ """
421
+
422
+ def forward(
423
+ self,
424
+ hidden_states: torch.Tensor,
425
+ attention_mask: Optional[torch.LongTensor] = None,
426
+ position_ids: Optional[torch.LongTensor] = None,
427
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
428
+ output_attentions: bool = False,
429
+ use_cache: bool = False,
430
+ **kwargs,
431
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
432
+ # InternLM2FlashAttention2 attention does not support output_attentions
433
+ if "padding_mask" in kwargs:
434
+ warnings.warn(
435
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
436
+ "Please make sure use `attention_mask` instead.`"
437
+ )
438
+
439
+ # overwrite attention_mask with padding_mask
440
+ attention_mask = kwargs.pop("padding_mask")
441
+
442
+ output_attentions = False
443
+
444
+ bsz, q_len, _ = hidden_states.size()
445
+
446
+ qkv_states = self.wqkv(hidden_states)
447
+
448
+ qkv_states = rearrange(
449
+ qkv_states,
450
+ "b q (h gs d) -> b q h gs d",
451
+ gs=2 + self.num_key_value_groups,
452
+ d=self.head_dim,
453
+ )
454
+
455
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
456
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
457
+ key_states = qkv_states[..., -2, :]
458
+ value_states = qkv_states[..., -1, :]
459
+
460
+ query_states = query_states.transpose(1, 2)
461
+ key_states = key_states.transpose(1, 2)
462
+ value_states = value_states.transpose(1, 2)
463
+
464
+ kv_seq_len = key_states.shape[-2]
465
+ if past_key_value is not None:
466
+ kv_seq_len += past_key_value[0].shape[-2]
467
+
468
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
469
+
470
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
471
+
472
+ if past_key_value is not None:
473
+ # reuse k, v, self_attention
474
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
475
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
476
+
477
+ past_key_value = (key_states, value_states) if use_cache else None
478
+
479
+ query_states = query_states.transpose(1, 2)
480
+ key_states = key_states.transpose(1, 2)
481
+ value_states = value_states.transpose(1, 2)
482
+
483
+ attn_output = self._flash_attention_forward(
484
+ query_states, key_states, value_states, attention_mask, q_len
485
+ )
486
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
487
+ attn_output = self.wo(attn_output)
488
+
489
+ if not output_attentions:
490
+ attn_weights = None
491
+
492
+ return attn_output, attn_weights, past_key_value
493
+
494
+ def _flash_attention_forward(
495
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
496
+ ):
497
+ """
498
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
499
+ first unpad the input, then computes the attention scores and pad the final attention scores.
500
+
501
+ Args:
502
+ query_states (`torch.Tensor`):
503
+ Input query states to be passed to Flash Attention API
504
+ key_states (`torch.Tensor`):
505
+ Input key states to be passed to Flash Attention API
506
+ value_states (`torch.Tensor`):
507
+ Input value states to be passed to Flash Attention API
508
+ attention_mask (`torch.Tensor`):
509
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
510
+ position of padding tokens and 1 for the position of non-padding tokens.
511
+ dropout (`int`, *optional*):
512
+ Attention dropout
513
+ softmax_scale (`float`, *optional*):
514
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
515
+ """
516
+ # Contains at least one padding token in the sequence
517
+ causal = self.is_causal and query_length != 1
518
+ if attention_mask is not None:
519
+ batch_size = query_states.shape[0]
520
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
521
+ query_states, key_states, value_states, attention_mask, query_length
522
+ )
523
+
524
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
525
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
526
+
527
+ attn_output_unpad = flash_attn_varlen_func(
528
+ query_states,
529
+ key_states,
530
+ value_states,
531
+ cu_seqlens_q=cu_seqlens_q,
532
+ cu_seqlens_k=cu_seqlens_k,
533
+ max_seqlen_q=max_seqlen_in_batch_q,
534
+ max_seqlen_k=max_seqlen_in_batch_k,
535
+ dropout_p=dropout,
536
+ softmax_scale=softmax_scale,
537
+ causal=causal,
538
+ )
539
+
540
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
541
+ else:
542
+ attn_output = flash_attn_func(
543
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
544
+ )
545
+
546
+ return attn_output
547
+
548
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
549
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
550
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
551
+
552
+ key_layer = index_first_axis(
553
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
554
+ )
555
+ value_layer = index_first_axis(
556
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
557
+ )
558
+
559
+ if query_length == kv_seq_len:
560
+ query_layer = index_first_axis(
561
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
562
+ )
563
+ cu_seqlens_q = cu_seqlens_k
564
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
565
+ indices_q = indices_k
566
+ elif query_length == 1:
567
+ max_seqlen_in_batch_q = 1
568
+ cu_seqlens_q = torch.arange(
569
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
570
+ ) # There is a memcpy here, that is very bad.
571
+ indices_q = cu_seqlens_q[:-1]
572
+ query_layer = query_layer.squeeze(1)
573
+ else:
574
+ # The -q_len: slice assumes left padding.
575
+ attention_mask = attention_mask[:, -query_length:]
576
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
577
+
578
+ return (
579
+ query_layer,
580
+ key_layer,
581
+ value_layer,
582
+ indices_q.to(torch.int64),
583
+ (cu_seqlens_q, cu_seqlens_k),
584
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
585
+ )
586
+
587
+ INTERNLM2_ATTENTION_CLASSES = {
588
+ "eager": InternLM2Attention,
589
+ "flash_attention_2": InternLM2FlashAttention2,
590
+ }
591
+
592
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
593
+ class InternLM2DecoderLayer(nn.Module):
594
+ def __init__(self, config: InternLM2Config):
595
+ super().__init__()
596
+ self.hidden_size = config.hidden_size
597
+
598
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
599
+
600
+ self.feed_forward = InternLM2MLP(config)
601
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
602
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
603
+
604
+ def forward(
605
+ self,
606
+ hidden_states: torch.Tensor,
607
+ attention_mask: Optional[torch.Tensor] = None,
608
+ position_ids: Optional[torch.LongTensor] = None,
609
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
610
+ output_attentions: Optional[bool] = False,
611
+ use_cache: Optional[bool] = False,
612
+ **kwargs,
613
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
614
+ """
615
+ Args:
616
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
617
+ attention_mask (`torch.FloatTensor`, *optional*):
618
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
619
+ query_sequence_length, key_sequence_length)` if default attention is used.
620
+ output_attentions (`bool`, *optional*):
621
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
622
+ returned tensors for more detail.
623
+ use_cache (`bool`, *optional*):
624
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
625
+ (see `past_key_values`).
626
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
627
+ """
628
+ if "padding_mask" in kwargs:
629
+ warnings.warn(
630
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
631
+ "Please make sure use `attention_mask` instead.`"
632
+ )
633
+
634
+ residual = hidden_states
635
+
636
+ hidden_states = self.attention_norm(hidden_states)
637
+
638
+ # Self Attention
639
+ hidden_states, self_attn_weights, present_key_value = self.attention(
640
+ hidden_states=hidden_states,
641
+ attention_mask=attention_mask,
642
+ position_ids=position_ids,
643
+ past_key_value=past_key_value,
644
+ output_attentions=output_attentions,
645
+ use_cache=use_cache,
646
+ **kwargs,
647
+ )
648
+ hidden_states = residual + hidden_states
649
+
650
+ # Fully Connected
651
+ residual = hidden_states
652
+ hidden_states = self.ffn_norm(hidden_states)
653
+ hidden_states = self.feed_forward(hidden_states)
654
+ hidden_states = residual + hidden_states
655
+
656
+ outputs = (hidden_states,)
657
+
658
+ if output_attentions:
659
+ outputs += (self_attn_weights,)
660
+
661
+ if use_cache:
662
+ outputs += (present_key_value,)
663
+
664
+ return outputs
665
+
666
+
667
+ InternLM2_START_DOCSTRING = r"""
668
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
669
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
670
+ etc.)
671
+
672
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
673
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
674
+ and behavior.
675
+
676
+ Parameters:
677
+ config ([`InternLM2Config`]):
678
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
679
+ load the weights associated with the model, only the configuration. Check out the
680
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
681
+ """
682
+
683
+
684
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
685
+ @add_start_docstrings(
686
+ "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
687
+ InternLM2_START_DOCSTRING,
688
+ )
689
+ class InternLM2PreTrainedModel(PreTrainedModel):
690
+ config_class = InternLM2Config
691
+ base_model_prefix = "model"
692
+ supports_gradient_checkpointing = True
693
+ _no_split_modules = ["InternLM2DecoderLayer"]
694
+ _skip_keys_device_placement = "past_key_values"
695
+
696
+ def _init_weights(self, module):
697
+ std = self.config.initializer_range
698
+ if isinstance(module, nn.Linear):
699
+ module.weight.data.normal_(mean=0.0, std=std)
700
+ if module.bias is not None:
701
+ module.bias.data.zero_()
702
+ elif isinstance(module, nn.Embedding):
703
+ module.weight.data.normal_(mean=0.0, std=std)
704
+ if module.padding_idx is not None:
705
+ module.weight.data[module.padding_idx].zero_()
706
+
707
+
708
+ InternLM2_INPUTS_DOCSTRING = r"""
709
+ Args:
710
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
711
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
712
+ it.
713
+
714
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
715
+ [`PreTrainedTokenizer.__call__`] for details.
716
+
717
+ [What are input IDs?](../glossary#input-ids)
718
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
719
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
720
+
721
+ - 1 for tokens that are **not masked**,
722
+ - 0 for tokens that are **masked**.
723
+
724
+ [What are attention masks?](../glossary#attention-mask)
725
+
726
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
727
+ [`PreTrainedTokenizer.__call__`] for details.
728
+
729
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
730
+ `past_key_values`).
731
+
732
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
733
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
734
+ information on the default strategy.
735
+
736
+ - 1 indicates the head is **not masked**,
737
+ - 0 indicates the head is **masked**.
738
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
739
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
740
+ config.n_positions - 1]`.
741
+
742
+ [What are position IDs?](../glossary#position-ids)
743
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
744
+ when `config.use_cache=True`):
745
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
746
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
747
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
748
+
749
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
750
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
751
+
752
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
753
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
754
+ of shape `(batch_size, sequence_length)`.
755
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
756
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
757
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
758
+ model's internal embedding lookup matrix.
759
+ use_cache (`bool`, *optional*):
760
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
761
+ `past_key_values`).
762
+ output_attentions (`bool`, *optional*):
763
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
764
+ tensors for more detail.
765
+ output_hidden_states (`bool`, *optional*):
766
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
767
+ more detail.
768
+ return_dict (`bool`, *optional*):
769
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
770
+ """
771
+
772
+
773
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
774
+ @add_start_docstrings(
775
+ "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
776
+ InternLM2_START_DOCSTRING,
777
+ )
778
+ class InternLM2Model(InternLM2PreTrainedModel):
779
+ """
780
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
781
+
782
+ Args:
783
+ config: InternLM2Config
784
+ """
785
+
786
+ _auto_class = "AutoModel"
787
+
788
+ def __init__(self, config: InternLM2Config):
789
+ super().__init__(config)
790
+ self.padding_idx = config.pad_token_id
791
+ self.vocab_size = config.vocab_size
792
+ self.config = config
793
+
794
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
795
+
796
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
797
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
798
+
799
+ self.gradient_checkpointing = False
800
+ # Initialize weights and apply final processing
801
+ self.post_init()
802
+
803
+ def get_input_embeddings(self):
804
+ return self.tok_embeddings
805
+
806
+ def set_input_embeddings(self, value):
807
+ self.tok_embeddings = value
808
+
809
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
810
+ # create causal mask
811
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
812
+ combined_attention_mask = None
813
+ if input_shape[-1] > 1:
814
+ combined_attention_mask = _make_causal_mask(
815
+ input_shape,
816
+ inputs_embeds.dtype,
817
+ device=inputs_embeds.device,
818
+ past_key_values_length=past_key_values_length,
819
+ )
820
+
821
+ if attention_mask is not None:
822
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
823
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
824
+ inputs_embeds.device
825
+ )
826
+ combined_attention_mask = (
827
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
828
+ )
829
+
830
+ return combined_attention_mask
831
+
832
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
833
+ def forward(
834
+ self,
835
+ input_ids: torch.LongTensor = None,
836
+ attention_mask: Optional[torch.Tensor] = None,
837
+ position_ids: Optional[torch.LongTensor] = None,
838
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
839
+ inputs_embeds: Optional[torch.FloatTensor] = None,
840
+ use_cache: Optional[bool] = None,
841
+ output_attentions: Optional[bool] = None,
842
+ output_hidden_states: Optional[bool] = None,
843
+ return_dict: Optional[bool] = None,
844
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
845
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
846
+ output_hidden_states = (
847
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
848
+ )
849
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
850
+
851
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
852
+
853
+ if self.config.attn_implementation == "flash_attention_2":
854
+ _import_flash_attn()
855
+
856
+ # retrieve input_ids and inputs_embeds
857
+ if input_ids is not None and inputs_embeds is not None:
858
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
859
+ elif input_ids is not None:
860
+ batch_size, seq_length = input_ids.shape[:2]
861
+ elif inputs_embeds is not None:
862
+ batch_size, seq_length = inputs_embeds.shape[:2]
863
+ else:
864
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
865
+
866
+ seq_length_with_past = seq_length
867
+ past_key_values_length = 0
868
+ if past_key_values is not None:
869
+ past_key_values_length = past_key_values[0][0].shape[2]
870
+ seq_length_with_past = seq_length_with_past + past_key_values_length
871
+
872
+ if position_ids is None:
873
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
874
+ position_ids = torch.arange(
875
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
876
+ )
877
+ position_ids = position_ids.unsqueeze(0)
878
+
879
+ if inputs_embeds is None:
880
+ inputs_embeds = self.tok_embeddings(input_ids)
881
+
882
+ if self.config.attn_implementation == "flash_attention_2":
883
+ # 2d mask is passed through the layers
884
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
885
+ else:
886
+ if attention_mask is None:
887
+ attention_mask = torch.ones(
888
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
889
+ )
890
+ attention_mask = self._prepare_decoder_attention_mask(
891
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
892
+ )
893
+
894
+ # embed positions
895
+ hidden_states = inputs_embeds
896
+
897
+ if self.gradient_checkpointing and self.training:
898
+ if use_cache:
899
+ logger.warning_once(
900
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
901
+ )
902
+ use_cache = False
903
+
904
+ # decoder layers
905
+ all_hidden_states = () if output_hidden_states else None
906
+ all_self_attns = () if output_attentions else None
907
+ next_decoder_cache = () if use_cache else None
908
+
909
+ for idx, decoder_layer in enumerate(self.layers):
910
+ if output_hidden_states:
911
+ all_hidden_states += (hidden_states,)
912
+
913
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
914
+
915
+ if self.gradient_checkpointing and self.training:
916
+
917
+ def create_custom_forward(module):
918
+ def custom_forward(*inputs):
919
+ # None for past_key_value
920
+ return module(*inputs, output_attentions, None)
921
+
922
+ return custom_forward
923
+
924
+ layer_outputs = torch.utils.checkpoint.checkpoint(
925
+ create_custom_forward(decoder_layer),
926
+ hidden_states,
927
+ attention_mask,
928
+ position_ids,
929
+ None,
930
+ )
931
+ else:
932
+ layer_outputs = decoder_layer(
933
+ hidden_states,
934
+ attention_mask=attention_mask,
935
+ position_ids=position_ids,
936
+ past_key_value=past_key_value,
937
+ output_attentions=output_attentions,
938
+ use_cache=use_cache,
939
+ )
940
+
941
+ hidden_states = layer_outputs[0]
942
+
943
+ if use_cache:
944
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
945
+
946
+ if output_attentions:
947
+ all_self_attns += (layer_outputs[1],)
948
+
949
+ hidden_states = self.norm(hidden_states)
950
+
951
+ # add hidden states from the last decoder layer
952
+ if output_hidden_states:
953
+ all_hidden_states += (hidden_states,)
954
+
955
+ next_cache = next_decoder_cache if use_cache else None
956
+ if not return_dict:
957
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
958
+ return BaseModelOutputWithPast(
959
+ last_hidden_state=hidden_states,
960
+ past_key_values=next_cache,
961
+ hidden_states=all_hidden_states,
962
+ attentions=all_self_attns,
963
+ )
964
+
965
+
966
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
967
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
968
+ _auto_class = "AutoModelForCausalLM"
969
+
970
+ _tied_weights_keys = ["output.weight"]
971
+
972
+ def __init__(self, config):
973
+ super().__init__(config)
974
+ self.model = InternLM2Model(config)
975
+ self.vocab_size = config.vocab_size
976
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
977
+
978
+ # Initialize weights and apply final processing
979
+ self.post_init()
980
+
981
+ def get_input_embeddings(self):
982
+ return self.model.tok_embeddings
983
+
984
+ def set_input_embeddings(self, value):
985
+ self.model.tok_embeddings = value
986
+
987
+ def get_output_embeddings(self):
988
+ return self.output
989
+
990
+ def set_output_embeddings(self, new_embeddings):
991
+ self.output = new_embeddings
992
+
993
+ def set_decoder(self, decoder):
994
+ self.model = decoder
995
+
996
+ def get_decoder(self):
997
+ return self.model
998
+
999
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1000
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1001
+ def forward(
1002
+ self,
1003
+ input_ids: torch.LongTensor = None,
1004
+ attention_mask: Optional[torch.Tensor] = None,
1005
+ position_ids: Optional[torch.LongTensor] = None,
1006
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1007
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1008
+ labels: Optional[torch.LongTensor] = None,
1009
+ use_cache: Optional[bool] = None,
1010
+ output_attentions: Optional[bool] = None,
1011
+ output_hidden_states: Optional[bool] = None,
1012
+ return_dict: Optional[bool] = None,
1013
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1014
+ r"""
1015
+ Args:
1016
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1017
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1018
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1019
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1020
+
1021
+ Returns:
1022
+
1023
+ Example:
1024
+
1025
+ ```python
1026
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1027
+
1028
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1029
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1030
+
1031
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1032
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1033
+
1034
+ >>> # Generate
1035
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1036
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1037
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1038
+ ```"""
1039
+
1040
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1041
+ output_hidden_states = (
1042
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1043
+ )
1044
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1045
+
1046
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1047
+ outputs = self.model(
1048
+ input_ids=input_ids,
1049
+ attention_mask=attention_mask,
1050
+ position_ids=position_ids,
1051
+ past_key_values=past_key_values,
1052
+ inputs_embeds=inputs_embeds,
1053
+ use_cache=use_cache,
1054
+ output_attentions=output_attentions,
1055
+ output_hidden_states=output_hidden_states,
1056
+ return_dict=return_dict,
1057
+ )
1058
+
1059
+ hidden_states = outputs[0]
1060
+ logits = self.output(hidden_states)
1061
+ logits = logits.float()
1062
+
1063
+ loss = None
1064
+ if labels is not None:
1065
+ # Shift so that tokens < n predict n
1066
+ shift_logits = logits[..., :-1, :].contiguous()
1067
+ shift_labels = labels[..., 1:].contiguous()
1068
+ # Flatten the tokens
1069
+ loss_fct = CrossEntropyLoss()
1070
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1071
+ shift_labels = shift_labels.view(-1)
1072
+ # Enable model parallelism
1073
+ shift_labels = shift_labels.to(shift_logits.device)
1074
+ loss = loss_fct(shift_logits, shift_labels)
1075
+
1076
+ if not return_dict:
1077
+ output = (logits,) + outputs[1:]
1078
+ return (loss,) + output if loss is not None else output
1079
+
1080
+ return CausalLMOutputWithPast(
1081
+ loss=loss,
1082
+ logits=logits,
1083
+ past_key_values=outputs.past_key_values,
1084
+ hidden_states=outputs.hidden_states,
1085
+ attentions=outputs.attentions,
1086
+ )
1087
+
1088
+ def prepare_inputs_for_generation(
1089
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1090
+ ):
1091
+ if past_key_values is not None:
1092
+ past_length = past_key_values[0][0].shape[2]
1093
+
1094
+ # Some generation methods already pass only the last input ID
1095
+ if input_ids.shape[1] > past_length:
1096
+ remove_prefix_length = past_length
1097
+ else:
1098
+ # Default to old behavior: keep only final ID
1099
+ remove_prefix_length = input_ids.shape[1] - 1
1100
+
1101
+ input_ids = input_ids[:, remove_prefix_length:]
1102
+
1103
+ position_ids = kwargs.get("position_ids", None)
1104
+ if attention_mask is not None and position_ids is None:
1105
+ # create position_ids on the fly for batch generation
1106
+ position_ids = attention_mask.long().cumsum(-1) - 1
1107
+ position_ids.masked_fill_(attention_mask == 0, 1)
1108
+ if past_key_values:
1109
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1110
+
1111
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1112
+ if inputs_embeds is not None and past_key_values is None:
1113
+ model_inputs = {"inputs_embeds": inputs_embeds}
1114
+ else:
1115
+ model_inputs = {"input_ids": input_ids}
1116
+
1117
+ model_inputs.update(
1118
+ {
1119
+ "position_ids": position_ids,
1120
+ "past_key_values": past_key_values,
1121
+ "use_cache": kwargs.get("use_cache"),
1122
+ "attention_mask": attention_mask,
1123
+ }
1124
+ )
1125
+ return model_inputs
1126
+
1127
+ @staticmethod
1128
+ def _reorder_cache(past_key_values, beam_idx):
1129
+ reordered_past = ()
1130
+ for layer_past in past_key_values:
1131
+ reordered_past += (
1132
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1133
+ )
1134
+ return reordered_past
1135
+
1136
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=""):
1137
+ prompt = ""
1138
+ if meta_instruction:
1139
+ prompt += f"""<s><|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1140
+ else:
1141
+ prompt += "<s>"
1142
+ for record in history:
1143
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1144
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1145
+ return tokenizer([prompt], return_tensors="pt")
1146
+
1147
+ @torch.no_grad()
1148
+ def chat(
1149
+ self,
1150
+ tokenizer,
1151
+ query: str,
1152
+ history: List[Tuple[str, str]] = [],
1153
+ streamer: Optional[BaseStreamer] = None,
1154
+ max_new_tokens: int = 1024,
1155
+ do_sample: bool = True,
1156
+ temperature: float = 0.8,
1157
+ top_p: float = 0.8,
1158
+ meta_instruction: str = "You are an AI assistant whose name is InternLM (书生·浦语).\n"
1159
+ "- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n"
1160
+ "- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.",
1161
+ **kwargs,
1162
+ ):
1163
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1164
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1165
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1166
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["<|im_end|>"])[0]]
1167
+ outputs = self.generate(
1168
+ **inputs,
1169
+ streamer=streamer,
1170
+ max_new_tokens=max_new_tokens,
1171
+ do_sample=do_sample,
1172
+ temperature=temperature,
1173
+ top_p=top_p,
1174
+ eos_token_id=eos_token_id,
1175
+ **kwargs,
1176
+ )
1177
+ outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :]
1178
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1179
+ response = response.split("<|im_end|>")[0]
1180
+ history = history + [(query, response)]
1181
+ return response, history
1182
+
1183
+ @torch.no_grad()
1184
+ def stream_chat(
1185
+ self,
1186
+ tokenizer,
1187
+ query: str,
1188
+ history: List[Tuple[str, str]] = [],
1189
+ max_new_tokens: int = 1024,
1190
+ do_sample: bool = True,
1191
+ temperature: float = 0.8,
1192
+ top_p: float = 0.8,
1193
+ **kwargs,
1194
+ ):
1195
+ """
1196
+ Return a generator in format: (response, history)
1197
+ Eg.
1198
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1199
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1200
+ """
1201
+ if BaseStreamer is None:
1202
+ raise ModuleNotFoundError(
1203
+ "The version of `transformers` is too low. Please make sure "
1204
+ "that you have installed `transformers>=4.28.0`."
1205
+ )
1206
+
1207
+ response_queue = queue.Queue(maxsize=20)
1208
+
1209
+ class ChatStreamer(BaseStreamer):
1210
+ def __init__(self, tokenizer) -> None:
1211
+ super().__init__()
1212
+ self.tokenizer = tokenizer
1213
+ self.queue = response_queue
1214
+ self.query = query
1215
+ self.history = history
1216
+ self.response = ""
1217
+ self.received_inputs = False
1218
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1219
+
1220
+ def put(self, value):
1221
+ if len(value.shape) > 1 and value.shape[0] > 1:
1222
+ raise ValueError("ChatStreamer only supports batch size 1")
1223
+ elif len(value.shape) > 1:
1224
+ value = value[0]
1225
+
1226
+ if not self.received_inputs:
1227
+ # The first received value is input_ids, ignore here
1228
+ self.received_inputs = True
1229
+ return
1230
+
1231
+ token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
1232
+ if token.strip() != "<|im_end|>":
1233
+ self.response = self.response + token
1234
+ history = self.history + [(self.query, self.response)]
1235
+ self.queue.put((self.response, history))
1236
+
1237
+ def end(self):
1238
+ self.queue.put(None)
1239
+
1240
+ def stream_producer():
1241
+ return self.chat(
1242
+ tokenizer=tokenizer,
1243
+ query=query,
1244
+ streamer=ChatStreamer(tokenizer=tokenizer),
1245
+ history=history,
1246
+ max_new_tokens=max_new_tokens,
1247
+ do_sample=do_sample,
1248
+ temperature=temperature,
1249
+ top_p=top_p,
1250
+ **kwargs,
1251
+ )
1252
+
1253
+ def consumer():
1254
+ producer = threading.Thread(target=stream_producer)
1255
+ producer.start()
1256
+ while True:
1257
+ res = response_queue.get()
1258
+ if res is None:
1259
+ return
1260
+ yield res
1261
+
1262
+ return consumer()
1263
+
1264
+
1265
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1266
+ @add_start_docstrings(
1267
+ """
1268
+ The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1269
+
1270
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1271
+ as other causal models (e.g. GPT-2) do.
1272
+
1273
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1274
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1275
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1276
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1277
+ each row of the batch).
1278
+ """,
1279
+ InternLM2_START_DOCSTRING,
1280
+ )
1281
+ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1282
+ def __init__(self, config):
1283
+ super().__init__(config)
1284
+ self.num_labels = config.num_labels
1285
+ self.model = InternLM2Model(config)
1286
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1287
+
1288
+ # Initialize weights and apply final processing
1289
+ self.post_init()
1290
+
1291
+ def get_input_embeddings(self):
1292
+ return self.model.tok_embeddings
1293
+
1294
+ def set_input_embeddings(self, value):
1295
+ self.model.tok_embeddings = value
1296
+
1297
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1298
+ def forward(
1299
+ self,
1300
+ input_ids: torch.LongTensor = None,
1301
+ attention_mask: Optional[torch.Tensor] = None,
1302
+ position_ids: Optional[torch.LongTensor] = None,
1303
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1304
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1305
+ labels: Optional[torch.LongTensor] = None,
1306
+ use_cache: Optional[bool] = None,
1307
+ output_attentions: Optional[bool] = None,
1308
+ output_hidden_states: Optional[bool] = None,
1309
+ return_dict: Optional[bool] = None,
1310
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1311
+ r"""
1312
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1313
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1314
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1315
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1316
+ """
1317
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1318
+
1319
+ transformer_outputs = self.model(
1320
+ input_ids,
1321
+ attention_mask=attention_mask,
1322
+ position_ids=position_ids,
1323
+ past_key_values=past_key_values,
1324
+ inputs_embeds=inputs_embeds,
1325
+ use_cache=use_cache,
1326
+ output_attentions=output_attentions,
1327
+ output_hidden_states=output_hidden_states,
1328
+ return_dict=return_dict,
1329
+ )
1330
+ hidden_states = transformer_outputs[0]
1331
+ logits = self.score(hidden_states)
1332
+
1333
+ if input_ids is not None:
1334
+ batch_size = input_ids.shape[0]
1335
+ else:
1336
+ batch_size = inputs_embeds.shape[0]
1337
+
1338
+ if self.config.pad_token_id is None and batch_size != 1:
1339
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1340
+ if self.config.pad_token_id is None:
1341
+ sequence_lengths = -1
1342
+ else:
1343
+ if input_ids is not None:
1344
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1345
+ logits.device
1346
+ )
1347
+ else:
1348
+ sequence_lengths = -1
1349
+
1350
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1351
+
1352
+ loss = None
1353
+ if labels is not None:
1354
+ labels = labels.to(logits.device)
1355
+ if self.config.problem_type is None:
1356
+ if self.num_labels == 1:
1357
+ self.config.problem_type = "regression"
1358
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1359
+ self.config.problem_type = "single_label_classification"
1360
+ else:
1361
+ self.config.problem_type = "multi_label_classification"
1362
+
1363
+ if self.config.problem_type == "regression":
1364
+ loss_fct = MSELoss()
1365
+ if self.num_labels == 1:
1366
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1367
+ else:
1368
+ loss = loss_fct(pooled_logits, labels)
1369
+ elif self.config.problem_type == "single_label_classification":
1370
+ loss_fct = CrossEntropyLoss()
1371
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1372
+ elif self.config.problem_type == "multi_label_classification":
1373
+ loss_fct = BCEWithLogitsLoss()
1374
+ loss = loss_fct(pooled_logits, labels)
1375
+ if not return_dict:
1376
+ output = (pooled_logits,) + transformer_outputs[1:]
1377
+ return ((loss,) + output) if loss is not None else output
1378
+
1379
+ return SequenceClassifierOutputWithPast(
1380
+ loss=loss,
1381
+ logits=pooled_logits,
1382
+ past_key_values=transformer_outputs.past_key_values,
1383
+ hidden_states=transformer_outputs.hidden_states,
1384
+ attentions=transformer_outputs.attentions,
1385
+ )
model_repository/postprocessing/1/tokenizer/placeholder ADDED
File without changes
model_repository/postprocessing/1/tokenizer/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5251801088
4
+ },
5
+ "weight_map": {
6
+ "model.layers.0.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
7
+ "model.layers.0.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
8
+ "model.layers.0.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
9
+ "model.layers.0.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
10
+ "model.layers.0.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
11
+ "model.layers.0.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
12
+ "model.layers.0.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
13
+ "model.layers.0.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
14
+ "model.layers.0.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
15
+ "model.layers.0.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
16
+ "model.layers.0.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
17
+ "model.layers.0.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
18
+ "model.layers.0.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
19
+ "model.layers.0.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
20
+ "model.layers.0.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
21
+ "model.layers.0.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
22
+ "model.layers.0.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
23
+ "model.layers.1.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
24
+ "model.layers.1.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
25
+ "model.layers.1.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
26
+ "model.layers.1.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
27
+ "model.layers.1.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
28
+ "model.layers.1.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
29
+ "model.layers.1.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
30
+ "model.layers.1.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
31
+ "model.layers.1.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
32
+ "model.layers.1.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
33
+ "model.layers.1.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
34
+ "model.layers.1.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
35
+ "model.layers.1.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
36
+ "model.layers.1.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
37
+ "model.layers.1.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
38
+ "model.layers.1.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
39
+ "model.layers.1.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
40
+ "model.layers.10.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
41
+ "model.layers.10.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
42
+ "model.layers.10.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
43
+ "model.layers.10.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
44
+ "model.layers.10.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
45
+ "model.layers.10.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
46
+ "model.layers.10.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
47
+ "model.layers.10.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
48
+ "model.layers.10.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
49
+ "model.layers.10.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
50
+ "model.layers.10.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
51
+ "model.layers.10.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
52
+ "model.layers.10.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
53
+ "model.layers.10.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
54
+ "model.layers.10.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
55
+ "model.layers.10.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
56
+ "model.layers.10.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
57
+ "model.layers.11.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
58
+ "model.layers.11.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
59
+ "model.layers.11.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
60
+ "model.layers.11.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
61
+ "model.layers.11.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
62
+ "model.layers.11.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
63
+ "model.layers.11.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
64
+ "model.layers.11.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
65
+ "model.layers.11.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
66
+ "model.layers.11.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
67
+ "model.layers.11.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
68
+ "model.layers.11.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
69
+ "model.layers.11.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
70
+ "model.layers.11.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
71
+ "model.layers.11.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
72
+ "model.layers.11.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
73
+ "model.layers.11.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
74
+ "model.layers.12.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
75
+ "model.layers.12.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
76
+ "model.layers.12.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
77
+ "model.layers.12.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
78
+ "model.layers.12.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
79
+ "model.layers.12.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
80
+ "model.layers.12.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
81
+ "model.layers.12.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
82
+ "model.layers.12.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
83
+ "model.layers.12.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
84
+ "model.layers.12.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
85
+ "model.layers.12.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
86
+ "model.layers.12.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
87
+ "model.layers.12.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
88
+ "model.layers.12.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
89
+ "model.layers.12.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
90
+ "model.layers.12.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
91
+ "model.layers.13.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
92
+ "model.layers.13.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
93
+ "model.layers.13.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
94
+ "model.layers.13.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
95
+ "model.layers.13.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
96
+ "model.layers.13.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
97
+ "model.layers.13.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
98
+ "model.layers.13.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
99
+ "model.layers.13.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
100
+ "model.layers.13.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
101
+ "model.layers.13.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
102
+ "model.layers.13.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
103
+ "model.layers.13.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
104
+ "model.layers.13.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
105
+ "model.layers.13.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
106
+ "model.layers.13.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
107
+ "model.layers.13.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
108
+ "model.layers.14.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
109
+ "model.layers.14.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
110
+ "model.layers.14.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
111
+ "model.layers.14.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
112
+ "model.layers.14.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
113
+ "model.layers.14.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
114
+ "model.layers.14.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
115
+ "model.layers.14.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
116
+ "model.layers.14.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
117
+ "model.layers.14.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
118
+ "model.layers.14.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
119
+ "model.layers.14.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
120
+ "model.layers.14.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
121
+ "model.layers.14.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
122
+ "model.layers.14.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
123
+ "model.layers.14.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
124
+ "model.layers.14.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
125
+ "model.layers.15.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
126
+ "model.layers.15.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
127
+ "model.layers.15.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
128
+ "model.layers.15.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
129
+ "model.layers.15.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
130
+ "model.layers.15.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
131
+ "model.layers.15.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
132
+ "model.layers.15.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
133
+ "model.layers.15.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
134
+ "model.layers.15.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
135
+ "model.layers.15.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
136
+ "model.layers.15.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
137
+ "model.layers.15.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
138
+ "model.layers.15.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
139
+ "model.layers.15.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
140
+ "model.layers.15.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
141
+ "model.layers.15.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
142
+ "model.layers.16.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
143
+ "model.layers.16.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
144
+ "model.layers.16.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
145
+ "model.layers.16.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
146
+ "model.layers.16.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
147
+ "model.layers.16.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
148
+ "model.layers.16.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
149
+ "model.layers.16.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
150
+ "model.layers.16.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
151
+ "model.layers.16.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
152
+ "model.layers.16.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
153
+ "model.layers.16.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
154
+ "model.layers.16.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
155
+ "model.layers.16.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
156
+ "model.layers.16.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
157
+ "model.layers.16.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
158
+ "model.layers.16.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
159
+ "model.layers.17.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
160
+ "model.layers.17.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
161
+ "model.layers.17.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
162
+ "model.layers.17.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
163
+ "model.layers.17.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
164
+ "model.layers.17.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
165
+ "model.layers.17.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
166
+ "model.layers.17.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
167
+ "model.layers.17.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
168
+ "model.layers.17.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
169
+ "model.layers.17.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
170
+ "model.layers.17.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
171
+ "model.layers.17.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
172
+ "model.layers.17.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
173
+ "model.layers.17.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
174
+ "model.layers.17.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
175
+ "model.layers.17.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
176
+ "model.layers.18.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
177
+ "model.layers.18.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
178
+ "model.layers.18.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
179
+ "model.layers.18.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
180
+ "model.layers.18.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
181
+ "model.layers.18.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
182
+ "model.layers.18.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
183
+ "model.layers.18.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
184
+ "model.layers.18.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
185
+ "model.layers.18.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
186
+ "model.layers.18.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
187
+ "model.layers.18.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
188
+ "model.layers.18.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
189
+ "model.layers.18.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
190
+ "model.layers.18.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
191
+ "model.layers.18.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
192
+ "model.layers.18.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
193
+ "model.layers.19.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
194
+ "model.layers.19.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
195
+ "model.layers.19.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
196
+ "model.layers.19.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
197
+ "model.layers.19.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
198
+ "model.layers.19.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
199
+ "model.layers.19.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
200
+ "model.layers.19.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
201
+ "model.layers.19.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
202
+ "model.layers.19.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
203
+ "model.layers.19.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
204
+ "model.layers.19.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
205
+ "model.layers.19.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
206
+ "model.layers.19.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
207
+ "model.layers.19.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
208
+ "model.layers.19.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
209
+ "model.layers.19.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
210
+ "model.layers.2.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
211
+ "model.layers.2.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
212
+ "model.layers.2.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
213
+ "model.layers.2.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
214
+ "model.layers.2.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
215
+ "model.layers.2.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
216
+ "model.layers.2.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
217
+ "model.layers.2.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
218
+ "model.layers.2.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
219
+ "model.layers.2.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
220
+ "model.layers.2.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
221
+ "model.layers.2.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
222
+ "model.layers.2.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
223
+ "model.layers.2.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
224
+ "model.layers.2.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
225
+ "model.layers.2.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
226
+ "model.layers.2.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
227
+ "model.layers.20.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
228
+ "model.layers.20.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
229
+ "model.layers.20.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
230
+ "model.layers.20.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
231
+ "model.layers.20.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
232
+ "model.layers.20.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
233
+ "model.layers.20.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
234
+ "model.layers.20.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
235
+ "model.layers.20.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
236
+ "model.layers.20.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
237
+ "model.layers.20.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
238
+ "model.layers.20.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
239
+ "model.layers.20.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
240
+ "model.layers.20.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
241
+ "model.layers.20.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
242
+ "model.layers.20.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
243
+ "model.layers.20.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
244
+ "model.layers.21.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
245
+ "model.layers.21.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
246
+ "model.layers.21.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
247
+ "model.layers.21.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
248
+ "model.layers.21.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
249
+ "model.layers.21.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
250
+ "model.layers.21.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
251
+ "model.layers.21.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
252
+ "model.layers.21.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
253
+ "model.layers.21.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
254
+ "model.layers.21.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
255
+ "model.layers.21.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
256
+ "model.layers.21.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
257
+ "model.layers.21.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
258
+ "model.layers.21.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
259
+ "model.layers.21.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
260
+ "model.layers.21.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
261
+ "model.layers.22.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
262
+ "model.layers.22.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
263
+ "model.layers.22.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
264
+ "model.layers.22.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
265
+ "model.layers.22.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
266
+ "model.layers.22.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
267
+ "model.layers.22.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
268
+ "model.layers.22.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
269
+ "model.layers.22.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
270
+ "model.layers.22.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
271
+ "model.layers.22.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
272
+ "model.layers.22.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
273
+ "model.layers.22.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
274
+ "model.layers.22.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
275
+ "model.layers.22.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
276
+ "model.layers.22.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
277
+ "model.layers.22.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
278
+ "model.layers.23.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
279
+ "model.layers.23.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
280
+ "model.layers.23.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
281
+ "model.layers.23.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
282
+ "model.layers.23.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
283
+ "model.layers.23.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
284
+ "model.layers.23.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
285
+ "model.layers.23.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
286
+ "model.layers.23.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
287
+ "model.layers.23.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
288
+ "model.layers.23.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
289
+ "model.layers.23.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
290
+ "model.layers.23.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
291
+ "model.layers.23.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
292
+ "model.layers.23.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
293
+ "model.layers.23.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
294
+ "model.layers.23.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
295
+ "model.layers.24.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
296
+ "model.layers.24.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
297
+ "model.layers.24.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
298
+ "model.layers.24.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
299
+ "model.layers.24.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
300
+ "model.layers.24.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
301
+ "model.layers.24.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
302
+ "model.layers.24.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
303
+ "model.layers.24.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
304
+ "model.layers.24.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
305
+ "model.layers.24.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
306
+ "model.layers.24.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
307
+ "model.layers.24.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
308
+ "model.layers.24.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
309
+ "model.layers.24.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
310
+ "model.layers.24.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
311
+ "model.layers.24.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
312
+ "model.layers.25.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
313
+ "model.layers.25.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
314
+ "model.layers.25.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
315
+ "model.layers.25.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
316
+ "model.layers.25.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
317
+ "model.layers.25.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
318
+ "model.layers.25.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
319
+ "model.layers.25.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
320
+ "model.layers.25.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
321
+ "model.layers.25.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
322
+ "model.layers.25.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
323
+ "model.layers.25.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
324
+ "model.layers.25.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
325
+ "model.layers.25.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
326
+ "model.layers.25.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
327
+ "model.layers.25.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
328
+ "model.layers.25.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
329
+ "model.layers.26.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
330
+ "model.layers.26.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
331
+ "model.layers.26.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
332
+ "model.layers.26.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
333
+ "model.layers.26.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
334
+ "model.layers.26.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
335
+ "model.layers.26.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
336
+ "model.layers.26.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
337
+ "model.layers.26.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
338
+ "model.layers.26.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
339
+ "model.layers.26.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
340
+ "model.layers.26.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
341
+ "model.layers.26.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
342
+ "model.layers.26.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
343
+ "model.layers.26.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
344
+ "model.layers.26.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
345
+ "model.layers.26.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
346
+ "model.layers.27.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
347
+ "model.layers.27.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
348
+ "model.layers.27.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
349
+ "model.layers.27.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
350
+ "model.layers.27.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
351
+ "model.layers.27.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
352
+ "model.layers.27.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
353
+ "model.layers.27.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
354
+ "model.layers.27.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
355
+ "model.layers.27.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
356
+ "model.layers.27.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
357
+ "model.layers.27.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
358
+ "model.layers.27.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
359
+ "model.layers.27.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
360
+ "model.layers.27.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
361
+ "model.layers.27.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
362
+ "model.layers.27.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
363
+ "model.layers.28.attention.wo.qweight": "pytorch_model-00003-of-00003.bin",
364
+ "model.layers.28.attention.wo.qzeros": "pytorch_model-00003-of-00003.bin",
365
+ "model.layers.28.attention.wo.scales": "pytorch_model-00003-of-00003.bin",
366
+ "model.layers.28.attention.wqkv.qweight": "pytorch_model-00003-of-00003.bin",
367
+ "model.layers.28.attention.wqkv.qzeros": "pytorch_model-00003-of-00003.bin",
368
+ "model.layers.28.attention.wqkv.scales": "pytorch_model-00003-of-00003.bin",
369
+ "model.layers.28.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
370
+ "model.layers.28.feed_forward.w1.qweight": "pytorch_model-00003-of-00003.bin",
371
+ "model.layers.28.feed_forward.w1.qzeros": "pytorch_model-00003-of-00003.bin",
372
+ "model.layers.28.feed_forward.w1.scales": "pytorch_model-00003-of-00003.bin",
373
+ "model.layers.28.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
374
+ "model.layers.28.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
375
+ "model.layers.28.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
376
+ "model.layers.28.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
377
+ "model.layers.28.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
378
+ "model.layers.28.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
379
+ "model.layers.28.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
380
+ "model.layers.29.attention.wo.qweight": "pytorch_model-00003-of-00003.bin",
381
+ "model.layers.29.attention.wo.qzeros": "pytorch_model-00003-of-00003.bin",
382
+ "model.layers.29.attention.wo.scales": "pytorch_model-00003-of-00003.bin",
383
+ "model.layers.29.attention.wqkv.qweight": "pytorch_model-00003-of-00003.bin",
384
+ "model.layers.29.attention.wqkv.qzeros": "pytorch_model-00003-of-00003.bin",
385
+ "model.layers.29.attention.wqkv.scales": "pytorch_model-00003-of-00003.bin",
386
+ "model.layers.29.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
387
+ "model.layers.29.feed_forward.w1.qweight": "pytorch_model-00003-of-00003.bin",
388
+ "model.layers.29.feed_forward.w1.qzeros": "pytorch_model-00003-of-00003.bin",
389
+ "model.layers.29.feed_forward.w1.scales": "pytorch_model-00003-of-00003.bin",
390
+ "model.layers.29.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
391
+ "model.layers.29.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
392
+ "model.layers.29.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
393
+ "model.layers.29.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
394
+ "model.layers.29.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
395
+ "model.layers.29.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
396
+ "model.layers.29.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
397
+ "model.layers.3.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
398
+ "model.layers.3.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
399
+ "model.layers.3.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
400
+ "model.layers.3.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
401
+ "model.layers.3.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
402
+ "model.layers.3.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
403
+ "model.layers.3.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
404
+ "model.layers.3.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
405
+ "model.layers.3.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
406
+ "model.layers.3.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
407
+ "model.layers.3.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
408
+ "model.layers.3.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
409
+ "model.layers.3.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
410
+ "model.layers.3.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
411
+ "model.layers.3.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
412
+ "model.layers.3.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
413
+ "model.layers.3.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
414
+ "model.layers.30.attention.wo.qweight": "pytorch_model-00003-of-00003.bin",
415
+ "model.layers.30.attention.wo.qzeros": "pytorch_model-00003-of-00003.bin",
416
+ "model.layers.30.attention.wo.scales": "pytorch_model-00003-of-00003.bin",
417
+ "model.layers.30.attention.wqkv.qweight": "pytorch_model-00003-of-00003.bin",
418
+ "model.layers.30.attention.wqkv.qzeros": "pytorch_model-00003-of-00003.bin",
419
+ "model.layers.30.attention.wqkv.scales": "pytorch_model-00003-of-00003.bin",
420
+ "model.layers.30.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
421
+ "model.layers.30.feed_forward.w1.qweight": "pytorch_model-00003-of-00003.bin",
422
+ "model.layers.30.feed_forward.w1.qzeros": "pytorch_model-00003-of-00003.bin",
423
+ "model.layers.30.feed_forward.w1.scales": "pytorch_model-00003-of-00003.bin",
424
+ "model.layers.30.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
425
+ "model.layers.30.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
426
+ "model.layers.30.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
427
+ "model.layers.30.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
428
+ "model.layers.30.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
429
+ "model.layers.30.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
430
+ "model.layers.30.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
431
+ "model.layers.31.attention.wo.qweight": "pytorch_model-00003-of-00003.bin",
432
+ "model.layers.31.attention.wo.qzeros": "pytorch_model-00003-of-00003.bin",
433
+ "model.layers.31.attention.wo.scales": "pytorch_model-00003-of-00003.bin",
434
+ "model.layers.31.attention.wqkv.qweight": "pytorch_model-00003-of-00003.bin",
435
+ "model.layers.31.attention.wqkv.qzeros": "pytorch_model-00003-of-00003.bin",
436
+ "model.layers.31.attention.wqkv.scales": "pytorch_model-00003-of-00003.bin",
437
+ "model.layers.31.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
438
+ "model.layers.31.feed_forward.w1.qweight": "pytorch_model-00003-of-00003.bin",
439
+ "model.layers.31.feed_forward.w1.qzeros": "pytorch_model-00003-of-00003.bin",
440
+ "model.layers.31.feed_forward.w1.scales": "pytorch_model-00003-of-00003.bin",
441
+ "model.layers.31.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
442
+ "model.layers.31.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
443
+ "model.layers.31.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
444
+ "model.layers.31.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
445
+ "model.layers.31.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
446
+ "model.layers.31.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
447
+ "model.layers.31.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
448
+ "model.layers.4.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
449
+ "model.layers.4.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
450
+ "model.layers.4.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
451
+ "model.layers.4.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
452
+ "model.layers.4.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
453
+ "model.layers.4.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
454
+ "model.layers.4.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
455
+ "model.layers.4.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
456
+ "model.layers.4.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
457
+ "model.layers.4.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
458
+ "model.layers.4.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
459
+ "model.layers.4.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
460
+ "model.layers.4.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
461
+ "model.layers.4.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
462
+ "model.layers.4.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
463
+ "model.layers.4.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
464
+ "model.layers.4.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
465
+ "model.layers.5.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
466
+ "model.layers.5.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
467
+ "model.layers.5.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
468
+ "model.layers.5.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
469
+ "model.layers.5.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
470
+ "model.layers.5.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
471
+ "model.layers.5.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
472
+ "model.layers.5.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
473
+ "model.layers.5.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
474
+ "model.layers.5.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
475
+ "model.layers.5.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
476
+ "model.layers.5.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
477
+ "model.layers.5.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
478
+ "model.layers.5.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
479
+ "model.layers.5.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
480
+ "model.layers.5.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
481
+ "model.layers.5.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
482
+ "model.layers.6.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
483
+ "model.layers.6.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
484
+ "model.layers.6.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
485
+ "model.layers.6.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
486
+ "model.layers.6.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
487
+ "model.layers.6.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
488
+ "model.layers.6.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
489
+ "model.layers.6.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
490
+ "model.layers.6.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
491
+ "model.layers.6.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
492
+ "model.layers.6.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
493
+ "model.layers.6.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
494
+ "model.layers.6.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
495
+ "model.layers.6.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
496
+ "model.layers.6.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
497
+ "model.layers.6.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
498
+ "model.layers.6.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
499
+ "model.layers.7.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
500
+ "model.layers.7.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
501
+ "model.layers.7.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
502
+ "model.layers.7.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
503
+ "model.layers.7.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
504
+ "model.layers.7.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
505
+ "model.layers.7.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
506
+ "model.layers.7.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
507
+ "model.layers.7.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
508
+ "model.layers.7.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
509
+ "model.layers.7.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
510
+ "model.layers.7.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
511
+ "model.layers.7.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
512
+ "model.layers.7.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
513
+ "model.layers.7.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
514
+ "model.layers.7.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
515
+ "model.layers.7.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
516
+ "model.layers.8.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
517
+ "model.layers.8.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
518
+ "model.layers.8.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
519
+ "model.layers.8.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
520
+ "model.layers.8.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
521
+ "model.layers.8.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
522
+ "model.layers.8.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
523
+ "model.layers.8.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
524
+ "model.layers.8.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
525
+ "model.layers.8.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
526
+ "model.layers.8.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
527
+ "model.layers.8.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
528
+ "model.layers.8.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
529
+ "model.layers.8.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
530
+ "model.layers.8.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
531
+ "model.layers.8.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
532
+ "model.layers.8.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
533
+ "model.layers.9.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
534
+ "model.layers.9.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
535
+ "model.layers.9.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
536
+ "model.layers.9.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
537
+ "model.layers.9.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
538
+ "model.layers.9.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
539
+ "model.layers.9.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
540
+ "model.layers.9.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
541
+ "model.layers.9.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
542
+ "model.layers.9.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
543
+ "model.layers.9.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
544
+ "model.layers.9.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
545
+ "model.layers.9.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
546
+ "model.layers.9.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
547
+ "model.layers.9.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
548
+ "model.layers.9.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
549
+ "model.layers.9.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
550
+ "model.norm.weight": "pytorch_model-00003-of-00003.bin",
551
+ "model.tok_embeddings.weight": "pytorch_model-00001-of-00003.bin",
552
+ "output.weight": "pytorch_model-00003-of-00003.bin"
553
+ }
554
+ }
model_repository/postprocessing/1/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
model_repository/postprocessing/1/tokenizer/tokenization_internlm.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for IntermLM."""
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+ from transformers.tokenization_utils import PreTrainedTokenizer
28
+ from transformers.utils import logging
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
33
+
34
+ PRETRAINED_VOCAB_FILES_MAP = {}
35
+
36
+
37
+ class InternLMTokenizer(PreTrainedTokenizer):
38
+ """
39
+ Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ Path to the vocabulary file.
44
+ """
45
+
46
+ vocab_files_names = VOCAB_FILES_NAMES
47
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
48
+ model_input_names = ["input_ids", "attention_mask"]
49
+ _auto_class = "AutoTokenizer"
50
+
51
+ def __init__(
52
+ self,
53
+ vocab_file,
54
+ unk_token="<unk>",
55
+ bos_token="<s>",
56
+ eos_token="</s>",
57
+ pad_token="</s>",
58
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
59
+ add_bos_token=True,
60
+ add_eos_token=False,
61
+ decode_with_prefix_space=False,
62
+ clean_up_tokenization_spaces=False,
63
+ **kwargs,
64
+ ):
65
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
66
+ self.vocab_file = vocab_file
67
+ self.add_bos_token = add_bos_token
68
+ self.add_eos_token = add_eos_token
69
+ self.decode_with_prefix_space = decode_with_prefix_space
70
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
71
+ self.sp_model.Load(vocab_file)
72
+ self._no_prefix_space_tokens = None
73
+ super().__init__(
74
+ bos_token=bos_token,
75
+ eos_token=eos_token,
76
+ unk_token=unk_token,
77
+ pad_token=pad_token,
78
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
79
+ **kwargs,
80
+ )
81
+
82
+ """ Initialization"""
83
+
84
+ @property
85
+ def no_prefix_space_tokens(self):
86
+ if self._no_prefix_space_tokens is None:
87
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
88
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
89
+ return self._no_prefix_space_tokens
90
+
91
+ @property
92
+ def vocab_size(self):
93
+ """Returns vocab size"""
94
+ return self.sp_model.get_piece_size()
95
+
96
+ @property
97
+ def bos_token_id(self) -> Optional[int]:
98
+ return self.sp_model.bos_id()
99
+
100
+ @property
101
+ def eos_token_id(self) -> Optional[int]:
102
+ return self.sp_model.eos_id()
103
+
104
+ def get_vocab(self):
105
+ """Returns vocab as a dict"""
106
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
107
+ vocab.update(self.added_tokens_encoder)
108
+ return vocab
109
+
110
+ def _tokenize(self, text):
111
+ """Returns a tokenized string."""
112
+ return self.sp_model.encode(text, out_type=str)
113
+
114
+ def _convert_token_to_id(self, token):
115
+ """Converts a token (str) in an id using the vocab."""
116
+ return self.sp_model.piece_to_id(token)
117
+
118
+ def _convert_id_to_token(self, index):
119
+ """Converts an index (integer) in a token (str) using the vocab."""
120
+ token = self.sp_model.IdToPiece(index)
121
+ return token
122
+
123
+ def _maybe_add_prefix_space(self, tokens, decoded):
124
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
125
+ return " " + decoded
126
+ else:
127
+ return decoded
128
+
129
+ def convert_tokens_to_string(self, tokens):
130
+ """Converts a sequence of tokens (string) in a single string."""
131
+ current_sub_tokens = []
132
+ out_string = ""
133
+ prev_is_special = False
134
+ for token in tokens:
135
+ # make sure that special tokens are not decoded using sentencepiece model
136
+ if token in self.all_special_tokens:
137
+ if not prev_is_special:
138
+ out_string += " "
139
+ out_string += self.sp_model.decode(current_sub_tokens) + token
140
+ prev_is_special = True
141
+ current_sub_tokens = []
142
+ else:
143
+ current_sub_tokens.append(token)
144
+ prev_is_special = False
145
+ out_string += self.sp_model.decode(current_sub_tokens)
146
+ out_string = self.clean_up_tokenization(out_string)
147
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
148
+ return out_string[1:]
149
+
150
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
151
+ """
152
+ Save the vocabulary and special tokens file to a directory.
153
+
154
+ Args:
155
+ save_directory (`str`):
156
+ The directory in which to save the vocabulary.
157
+
158
+ Returns:
159
+ `Tuple(str)`: Paths to the files saved.
160
+ """
161
+ if not os.path.isdir(save_directory):
162
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
163
+ return
164
+ out_vocab_file = os.path.join(
165
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
166
+ )
167
+
168
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
169
+ copyfile(self.vocab_file, out_vocab_file)
170
+ elif not os.path.isfile(self.vocab_file):
171
+ with open(out_vocab_file, "wb") as fi:
172
+ content_spiece_model = self.sp_model.serialized_model_proto()
173
+ fi.write(content_spiece_model)
174
+
175
+ return (out_vocab_file,)
176
+
177
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
178
+ if self.add_bos_token:
179
+ bos_token_ids = [self.bos_token_id]
180
+ else:
181
+ bos_token_ids = []
182
+
183
+ output = bos_token_ids + token_ids_0
184
+
185
+ if token_ids_1 is not None:
186
+ output = output + token_ids_1
187
+
188
+ if self.add_eos_token:
189
+ output = output + [self.eos_token_id]
190
+
191
+ return output
192
+
193
+ def get_special_tokens_mask(
194
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
195
+ ) -> List[int]:
196
+ """
197
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
198
+ special tokens using the tokenizer `prepare_for_model` method.
199
+
200
+ Args:
201
+ token_ids_0 (`List[int]`):
202
+ List of IDs.
203
+ token_ids_1 (`List[int]`, *optional*):
204
+ Optional second list of IDs for sequence pairs.
205
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
206
+ Whether or not the token list is already formatted with special tokens for the model.
207
+
208
+ Returns:
209
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
210
+ """
211
+ if already_has_special_tokens:
212
+ return super().get_special_tokens_mask(
213
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
214
+ )
215
+
216
+ if token_ids_1 is None:
217
+ return [1] + ([0] * len(token_ids_0)) + [1]
218
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
219
+
220
+ def create_token_type_ids_from_sequences(
221
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
222
+ ) -> List[int]:
223
+ """
224
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
225
+ use of token type ids, therefore a list of zeros is returned.
226
+
227
+ Args:
228
+ token_ids_0 (`List[int]`):
229
+ List of IDs.
230
+ token_ids_1 (`List[int]`, *optional*):
231
+ Optional second list of IDs for sequence pairs.
232
+
233
+ Returns:
234
+ `List[int]`: List of zeros.
235
+ """
236
+ eos = [self.eos_token_id]
237
+
238
+ if token_ids_1 is None:
239
+ return len(token_ids_0 + eos) * [0]
240
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
model_repository/postprocessing/1/tokenizer/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
model_repository/postprocessing/1/tokenizer/tokenizer.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import json
3
+ import os
4
+ import os.path as osp
5
+ from collections import deque
6
+ from typing import List, Optional, Sequence, Union
7
+
8
+ import torch
9
+
10
+ from lmdeploy.utils import get_logger
11
+
12
+ # this file will be copied to triton server, make sure all
13
+ # importing are starting from the package root lmdeploy
14
+
15
+
16
+ class SentencePieceTokenizer:
17
+ """Tokenizer of sentencepiece.
18
+
19
+ Args:
20
+ model_file (str): the path of the tokenizer model
21
+ """
22
+
23
+ def __init__(self, model_file: str):
24
+ from sentencepiece import SentencePieceProcessor
25
+ self.model = SentencePieceProcessor(model_file=model_file)
26
+ self._prefix_space_tokens = None
27
+ # for stop words
28
+ self._maybe_decode_bytes: bool = None
29
+ # TODO maybe lack a constant.py
30
+ self._indexes_tokens_deque = deque(maxlen=10)
31
+ self.max_indexes_num = 5
32
+ self.logger = get_logger('lmdeploy')
33
+
34
+ @property
35
+ def vocab_size(self):
36
+ """vocabulary size."""
37
+ return self.model.vocab_size()
38
+
39
+ @property
40
+ def bos_token_id(self):
41
+ """begine of the sentence token id."""
42
+ return self.model.bos_id()
43
+
44
+ @property
45
+ def eos_token_id(self):
46
+ """end of the sentence token id."""
47
+ return self.model.eos_id()
48
+
49
+ @property
50
+ def prefix_space_tokens(self):
51
+ """tokens without prefix space."""
52
+ if self._prefix_space_tokens is None:
53
+ vocab = self.model.IdToPiece(list(range(self.vocab_size)))
54
+ self._prefix_space_tokens = {
55
+ i
56
+ for i, tok in enumerate(vocab) if tok.startswith('▁')
57
+ }
58
+ return self._prefix_space_tokens
59
+
60
+ def _maybe_add_prefix_space(self, tokens, decoded):
61
+ """maybe add prefix space for incremental decoding."""
62
+ if len(tokens) and not decoded.startswith(' ') and\
63
+ tokens[0] in self.prefix_space_tokens:
64
+ return ' ' + decoded
65
+ else:
66
+ return decoded
67
+
68
+ def indexes_containing_token(self, token: str):
69
+ """Return all the possible indexes, whose decoding output may contain
70
+ the input token."""
71
+ # traversing vocab is time consuming, can not be accelerated with
72
+ # multi threads (computation) or multi process (can't pickle tokenizer)
73
+ # so, we maintain latest 10 stop words and return directly if matched
74
+ for _token, _indexes in self._indexes_tokens_deque:
75
+ if token == _token:
76
+ return _indexes
77
+ if token == ' ': # ' ' is special
78
+ token = '▁'
79
+ vocab = self.model.IdToPiece(list(range(self.vocab_size)))
80
+ indexes = [i for i, voc in enumerate(vocab) if token in voc]
81
+ if len(indexes) > self.max_indexes_num:
82
+ indexes = self.encode(token, add_bos=False)[-1:]
83
+ self.logger.warning(
84
+ f'There are too many(>{self.max_indexes_num}) possible '
85
+ f'indexes may decoding {token}, we will use {indexes} only')
86
+ self._indexes_tokens_deque.append((token, indexes))
87
+ return indexes
88
+
89
+ def encode(self, s: str, add_bos: bool = True, **kwargs):
90
+ """Tokenize a prompt.
91
+
92
+ Args:
93
+ s (str): a prompt
94
+ Returns:
95
+ list[int]: token ids
96
+ """
97
+ return self.model.Encode(s, add_bos=add_bos, **kwargs)
98
+
99
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
100
+ """De-tokenize.
101
+
102
+ Args:
103
+ t (List[int]): a list of token ids
104
+ offset (int): for incrementally decoding. Default to None, which
105
+ means not applied.
106
+ Returns:
107
+ str: text of decoding tokens
108
+ """
109
+ if isinstance(t, torch.Tensor):
110
+ t = t.tolist()
111
+ t = t[offset:]
112
+ out_string = self.model.Decode(t)
113
+ if offset:
114
+ out_string = self._maybe_add_prefix_space(t, out_string)
115
+ return out_string
116
+
117
+ def __call__(self, s: Union[str, Sequence[str]]):
118
+ """Tokenize prompts.
119
+
120
+ Args:
121
+ s (str): prompts
122
+ Returns:
123
+ list[int]: token ids
124
+ """
125
+ import addict
126
+ add_bos = False
127
+ add_eos = False
128
+
129
+ input_ids = self.model.Encode(s, add_bos=add_bos, add_eos=add_eos)
130
+ return addict.Addict(input_ids=input_ids)
131
+
132
+
133
+ class HuggingFaceTokenizer:
134
+ """Tokenizer of sentencepiece.
135
+
136
+ Args:
137
+ model_dir (str): the directory of the tokenizer model
138
+ """
139
+
140
+ def __init__(self, model_dir: str):
141
+ from transformers import AutoTokenizer
142
+ model_file = osp.join(model_dir, 'tokenizer.model')
143
+ backend_tokenizer_file = osp.join(model_dir, 'tokenizer.json')
144
+ model_file_exists = osp.exists(model_file)
145
+ self.logger = get_logger('lmdeploy')
146
+ if not osp.exists(backend_tokenizer_file) and model_file_exists:
147
+ self.logger.warning(
148
+ 'Can not find tokenizer.json. '
149
+ 'It may take long time to initialize the tokenizer.')
150
+ self.model = AutoTokenizer.from_pretrained(model_dir,
151
+ trust_remote_code=True)
152
+ self._prefix_space_tokens = None
153
+ # save tokenizer.json to reuse
154
+ if not osp.exists(backend_tokenizer_file) and model_file_exists:
155
+ if hasattr(self.model, 'backend_tokenizer'):
156
+ if os.access(model_dir, os.W_OK):
157
+ self.model.backend_tokenizer.save(backend_tokenizer_file)
158
+
159
+ if self.model.eos_token_id is None:
160
+ generation_config_file = osp.join(model_dir,
161
+ 'generation_config.json')
162
+ if osp.exists(generation_config_file):
163
+ with open(generation_config_file, 'r') as f:
164
+ cfg = json.load(f)
165
+ self.model.eos_token_id = cfg['eos_token_id']
166
+ elif hasattr(self.model, 'eod_id'): # Qwen remote
167
+ self.model.eos_token_id = self.model.eod_id
168
+
169
+ # for stop words
170
+ self._vocab_size_with_added: int = None
171
+ self._maybe_decode_bytes: bool = None
172
+ # TODO maybe lack a constant.py
173
+ self._indexes_tokens_deque = deque(maxlen=10)
174
+ self.max_indexes_num = 5
175
+ self.token2id = {}
176
+
177
+ @property
178
+ def vocab_size(self):
179
+ """vocabulary size."""
180
+ return self.model.vocab_size
181
+
182
+ @property
183
+ def vocab_size_with_added(self):
184
+ """vocabulary size with added vocab."""
185
+ if self._vocab_size_with_added is not None:
186
+ return self._vocab_size_with_added
187
+ self._vocab_size_with_added = len(self.model.get_vocab())
188
+ return self._vocab_size_with_added
189
+
190
+ @property
191
+ def bos_token_id(self):
192
+ """begine of the sentence token id."""
193
+ return self.model.bos_token_id
194
+
195
+ @property
196
+ def eos_token_id(self):
197
+ """end of the sentence token id."""
198
+ return self.model.eos_token_id
199
+
200
+ @property
201
+ def prefix_space_tokens(self):
202
+ """tokens without prefix space."""
203
+ if self._prefix_space_tokens is None:
204
+ vocab = self.model.convert_ids_to_tokens(
205
+ list(range(self.vocab_size)))
206
+ self._prefix_space_tokens = {
207
+ i
208
+ for i, tok in enumerate(vocab)
209
+ if tok.startswith('▁' if isinstance(tok, str) else b' ')
210
+ }
211
+ return self._prefix_space_tokens
212
+
213
+ def _maybe_add_prefix_space(self, tokens: List[int], decoded: str):
214
+ """maybe add prefix space for incremental decoding."""
215
+ if len(tokens) and not decoded.startswith(' ') and\
216
+ tokens[0] in self.prefix_space_tokens:
217
+ return ' ' + decoded
218
+ else:
219
+ return decoded
220
+
221
+ @property
222
+ def maybe_decode_bytes(self):
223
+ """Check if self.model.convert_ids_to_tokens return not a str value."""
224
+ if self._maybe_decode_bytes is None:
225
+ self._maybe_decode_bytes = False
226
+ vocab = self.model.convert_ids_to_tokens(
227
+ list(range(self.vocab_size)))
228
+ for tok in vocab:
229
+ if not isinstance(tok, str):
230
+ self._maybe_decode_bytes = True
231
+ break
232
+ return self._maybe_decode_bytes
233
+
234
+ def indexes_containing_token(self, token: str):
235
+ """Return all the possible indexes, whose decoding output may contain
236
+ the input token."""
237
+ # traversing vocab is time consuming, can not be accelerated with
238
+ # multi threads (computation) or multi process (can't pickle tokenizer)
239
+ # so, we maintain latest 10 stop words and return directly if matched
240
+ for _token, _indexes in self._indexes_tokens_deque:
241
+ if token == _token:
242
+ return _indexes
243
+
244
+ if self.token2id == {}:
245
+ # decode is slower than convert_ids_to_tokens
246
+ if self.maybe_decode_bytes:
247
+ self.token2id = {
248
+ self.model.decode(i): i
249
+ for i in range(self.vocab_size)
250
+ }
251
+ else:
252
+ self.token2id = {
253
+ self.model.convert_ids_to_tokens(i): i
254
+ for i in range(self.vocab_size)
255
+ }
256
+ if token == ' ': # ' ' is special
257
+ token = '▁'
258
+ indexes = [i for _token, i in self.token2id.items() if token in _token]
259
+ if len(indexes) > self.max_indexes_num:
260
+ indexes = self.encode(token, add_bos=False)[-1:]
261
+ self.logger.warning(
262
+ f'There are too many(>{self.max_indexes_num}) possible '
263
+ f'indexes may decoding {token}, we will use {indexes} only')
264
+ # there might be token id that exceeds self.vocab_size
265
+ if len(indexes) == 0:
266
+ indexes = self.encode(token, False)
267
+ if len(indexes) != 1:
268
+ self.logger.warning(
269
+ f'The token {token}, its length of indexes {indexes} is '
270
+ 'not 1. Currently, it can not be used as stop words')
271
+ indexes = []
272
+ self._indexes_tokens_deque.append((token, indexes))
273
+ return indexes
274
+
275
+ def encode(self, s: str, add_bos: bool = True, **kwargs):
276
+ """Tokenize a prompt.
277
+
278
+ Args:
279
+ s (str): a prompt
280
+ Returns:
281
+ list[int]: token ids
282
+ """
283
+ encoded = self.model.encode(s, **kwargs)
284
+ if not add_bos:
285
+ # in the middle of a session
286
+ if len(encoded) and encoded[0] == self.bos_token_id:
287
+ encoded = encoded[1:]
288
+ return encoded
289
+
290
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
291
+ """De-tokenize.
292
+
293
+ Args:
294
+ t (List[int]): a list of token ids
295
+ offset (int): for incrementally decoding. Default to None, which
296
+ means not applied.
297
+ Returns:
298
+ str: text of decoding tokens
299
+ """
300
+ skip_special_tokens = True
301
+ t = t[offset:]
302
+ out_string = self.model.decode(t,
303
+ skip_special_tokens=skip_special_tokens)
304
+ if offset:
305
+ out_string = self._maybe_add_prefix_space(t, out_string)
306
+ return out_string
307
+
308
+ def __call__(self, s: Union[str, Sequence[str]]):
309
+ """Tokenize prompts.
310
+
311
+ Args:
312
+ s (str): prompts
313
+ Returns:
314
+ list[int]: token ids
315
+ """
316
+ add_special_tokens = False
317
+ return self.model(s, add_special_tokens=add_special_tokens)
318
+
319
+
320
+ class Tokenizer:
321
+ """Tokenize prompts or de-tokenize tokens into texts.
322
+
323
+ Args:
324
+ model_file (str): the path of the tokenizer model
325
+ """
326
+
327
+ def __init__(self, model_file: str):
328
+ if model_file.endswith('.model'):
329
+ model_folder = osp.split(model_file)[0]
330
+ else:
331
+ model_folder = model_file
332
+ model_file = osp.join(model_folder, 'tokenizer.model')
333
+ tokenizer_config_file = osp.join(model_folder, 'tokenizer_config.json')
334
+
335
+ model_file_exists = osp.exists(model_file)
336
+ config_exists = osp.exists(tokenizer_config_file)
337
+ use_hf_model = config_exists or not model_file_exists
338
+ self.logger = get_logger('lmdeploy')
339
+ if not use_hf_model:
340
+ self.model = SentencePieceTokenizer(model_file)
341
+ else:
342
+ self.model = HuggingFaceTokenizer(model_folder)
343
+
344
+ @property
345
+ def vocab_size(self):
346
+ """vocabulary size."""
347
+ return self.model.vocab_size
348
+
349
+ @property
350
+ def bos_token_id(self):
351
+ """begine of the sentence token id."""
352
+ return self.model.bos_token_id
353
+
354
+ @property
355
+ def eos_token_id(self):
356
+ """end of the sentence token id."""
357
+ return self.model.eos_token_id
358
+
359
+ def encode(self, s: str, add_bos: bool = True, **kwargs):
360
+ """Tokenize a prompt.
361
+
362
+ Args:
363
+ s (str): a prompt
364
+ Returns:
365
+ list[int]: token ids
366
+ """
367
+ return self.model.encode(s, add_bos, **kwargs)
368
+
369
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
370
+ """De-tokenize.
371
+
372
+ Args:
373
+ t (List[int]): a list of token ids
374
+ offset (int): for incrementally decoding. Default to None, which
375
+ means not applied.
376
+ Returns:
377
+ str: text of decoding tokens
378
+ """
379
+ return self.model.decode(t, offset)
380
+
381
+ def __call__(self, s: Union[str, Sequence[str]]):
382
+ """Tokenize prompts.
383
+
384
+ Args:
385
+ s (str): prompts
386
+ Returns:
387
+ list[int]: token ids
388
+ """
389
+ return self.model(s)
390
+
391
+ def indexes_containing_token(self, token):
392
+ """Return all the possible indexes, whose decoding output may contain
393
+ the input token."""
394
+ encoded = self.encode(token, add_bos=False)
395
+ if len(encoded) > 1:
396
+ self.logger.warning(
397
+ f'The token {token}, its length of indexes {encoded} is over '
398
+ 'than 1. Currently, it can not be used as stop words')
399
+ return []
400
+ return self.model.indexes_containing_token(token)
model_repository/postprocessing/1/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92538": {
28
+ "content": "<|plugin|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92539": {
36
+ "content": "<|interpreter|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92540": {
44
+ "content": "<|action_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92541": {
52
+ "content": "<|action_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92542": {
60
+ "content": "<|im_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92543": {
68
+ "content": "<|im_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ }
75
+ },
76
+ "auto_map": {
77
+ "AutoTokenizer": [
78
+ "tokenization_internlm.InternLMTokenizer",
79
+ null
80
+ ]
81
+ },
82
+ "bos_token": "<s>",
83
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
84
+ "clean_up_tokenization_spaces": false,
85
+ "eos_token": "</s>",
86
+ "model_max_length": 1000000000000000019884624838656,
87
+ "pad_token": "</s>",
88
+ "tokenizer_class": "InternLMTokenizer",
89
+ "unk_token": "<unk>"
90
+ }
model_repository/postprocessing/config.pbtxt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "postprocessing"
2
+ backend: "python"
3
+ max_batch_size: 1
4
+ input [
5
+ {
6
+ name: "TOKENS_BATCH"
7
+ data_type: TYPE_UINT32
8
+ dims: [ -1, -1 ]
9
+ },
10
+ {
11
+ name: "sequence_length"
12
+ data_type: TYPE_UINT32
13
+ dims: [ -1 ]
14
+ }
15
+ ]
16
+ output [
17
+ {
18
+ name: "OUTPUT"
19
+ data_type: TYPE_STRING
20
+ dims: [ -1, -1 ]
21
+ }
22
+ ]
23
+
24
+ instance_group [
25
+ {
26
+ count: 16
27
+ kind: KIND_CPU
28
+ }
29
+ ]
30
+
31
+ parameters {
32
+ key: "tokenizer_path"
33
+ value: {
34
+ string_value: "tokenizer/tokenizer.model"
35
+ }
36
+ }
model_repository/preprocessing/1/__pycache__/model.cpython-310.pyc ADDED
Binary file (4.92 kB). View file
 
model_repository/preprocessing/1/model.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import json
3
+ import os.path as osp
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ import torch
8
+ import triton_python_backend_utils as pb_utils
9
+ from torch.nn.utils.rnn import pad_sequence
10
+
11
+ # This tokenizer is `lmdeploy/turbomind/tokenizer.py`. When an LLM is served
12
+ # by triton inference server, it has to be converted first by running
13
+ # `python lmdeploy/serve/turbomind/deploy.py`. Then
14
+ # `lmdeploy/turbomind/tokenizer.py` will be copied to `tokenizer/tokenizer.py`
15
+ from .tokenizer.tokenizer import Tokenizer
16
+
17
+
18
+ class TritonPythonModel:
19
+ """Your Python model must use the same class name.
20
+
21
+ Every Python model that is created must have "TritonPythonModel" as the
22
+ class name.
23
+ """
24
+
25
+ def initialize(self, args):
26
+ """`initialize` is called only once when the model is being loaded.
27
+ Implementing `initialize` function is optional. This function allows
28
+ the model to initialize any state associated with this model.
29
+ Parameters
30
+ ----------
31
+ args : dict
32
+ Both keys and values are strings. The dictionary keys and values are:
33
+ * model_config: A JSON string containing the model configuration
34
+ * model_instance_kind: A string containing model instance kind
35
+ * model_instance_device_id: A string containing model instance device
36
+ ID
37
+ * model_repository: Model repository path
38
+ * model_version: Model version
39
+ * model_name: Model name
40
+ """
41
+ # Parse model configs
42
+ self.model_config = model_config = json.loads(args['model_config'])
43
+
44
+ # Parse model output configs and convert Triton types to numpy types
45
+ input_names = ['INPUT_ID', 'REQUEST_INPUT_LEN']
46
+ for input_name in input_names:
47
+ setattr(
48
+ self,
49
+ input_name.lower() + '_dtype',
50
+ pb_utils.triton_string_to_numpy(
51
+ pb_utils.get_output_config_by_name(
52
+ model_config, input_name)['data_type']))
53
+
54
+ cur_folder = Path(__file__).parent
55
+ self.tokenizer = Tokenizer(
56
+ osp.join(
57
+ cur_folder, self.model_config['parameters']['tokenizer_path']
58
+ ['string_value']))
59
+ self.start_id = self.tokenizer.bos_token_id
60
+ self.end_id = self.tokenizer.eos_token_id
61
+
62
+ def execute(self, requests):
63
+ """`execute` must be implemented in every Python model. `execute`
64
+ function receives a list of pb_utils.InferenceRequest as the only
65
+ argument. This function is called when an inference is requested
66
+ for this model. Depending on the batching configuration (e.g. Dynamic
67
+ Batching) used, `requests` may contain multiple requests. Every
68
+ Python model, must create one pb_utils.InferenceResponse for every
69
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
70
+ set the error argument when creating a pb_utils.InferenceResponse.
71
+ Parameters
72
+ ----------
73
+ requests : list
74
+ A list of pb_utils.InferenceRequest
75
+ Returns
76
+ -------
77
+ list
78
+ A list of pb_utils.InferenceResponse. The length of this list must
79
+ be the same as `requests`
80
+ """
81
+
82
+ responses = []
83
+
84
+ # Every Python backend must iterate over everyone of the requests
85
+ # and create a pb_utils.InferenceResponse for each of them.
86
+ for idx, request in enumerate(requests):
87
+ # Get input tensors
88
+ query = pb_utils.get_input_tensor_by_name(request,
89
+ 'QUERY').as_numpy()
90
+
91
+ # Preprocessing input data.
92
+ input_id, request_input_len = self._create_request(query)
93
+
94
+ # Create output tensors. You need pb_utils.Tensor
95
+ # objects to create pb_utils.InferenceResponse.
96
+ input_id_tensor = pb_utils.Tensor(
97
+ 'INPUT_ID',
98
+ np.array(input_id).astype(self.input_id_dtype))
99
+ request_input_len_tensor = pb_utils.Tensor(
100
+ 'REQUEST_INPUT_LEN',
101
+ np.array(request_input_len).astype(
102
+ self.request_input_len_dtype))
103
+
104
+ # Create InferenceResponse. You can set an error here in case
105
+ # there was a problem with handling this inference request.
106
+ # Below is an example of how you can set errors in inference
107
+ # response:
108
+ #
109
+ # pb_utils.InferenceResponse(
110
+ # output_tensors=..., TritonError("An error occurred"))
111
+ inference_response = pb_utils.InferenceResponse(
112
+ output_tensors=[input_id_tensor, request_input_len_tensor])
113
+ responses.append(inference_response)
114
+
115
+ # You should return a list of pb_utils.InferenceResponse. Length
116
+ # of this list must match the length of `requests` list.
117
+ return responses
118
+
119
+ def finalize(self):
120
+ """`finalize` is called only once when the model is being unloaded.
121
+
122
+ Implementing `finalize` function is optional. This function allows the
123
+ model to perform any necessary clean ups before exit.
124
+ """
125
+ print('Cleaning up...')
126
+
127
+ def _create_request(self, query):
128
+ """Tokenize prompts and return the token ids and their length.
129
+
130
+ Args:
131
+ query (List[str]): a list of prompt
132
+ Returns:
133
+ tuple: token ids and their length
134
+ """
135
+ start_ids = []
136
+ for s in query:
137
+ _s = s[0].decode()
138
+ if _s == '<BOS>':
139
+ start_id = [self.start_id
140
+ ] if self.start_id is not None else [-1]
141
+ elif _s == '<EOS>':
142
+ start_id = [self.end_id] if self.end_id is not None else [-1]
143
+ else:
144
+ start_id = self.tokenizer.encode(_s)
145
+ start_ids.append(torch.IntTensor(start_id))
146
+
147
+ start_lengths = torch.IntTensor([[len(ids)] for ids in start_ids])
148
+ start_ids = pad_sequence(start_ids,
149
+ batch_first=True,
150
+ padding_value=self.end_id)
151
+ return start_ids, start_lengths
model_repository/preprocessing/1/tokenizer/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/psy/internlm2-7b/work_dirs/internlm2_chat_7b_qlora_oasst1_512_e3_copy/hf_2/merge",
3
+ "architectures": [
4
+ "InternLM2ForCausalLM"
5
+ ],
6
+ "attn_implementation": "eager",
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_internlm.InternLMConfig",
9
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
10
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
11
+ },
12
+ "bias": false,
13
+ "bos_token_id": 1,
14
+ "eos_token_id": 2,
15
+ "fp16": true,
16
+ "hidden_act": "silu",
17
+ "hidden_size": 4096,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 14336,
20
+ "max_position_embeddings": 32768,
21
+ "model_type": "internlm",
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 32,
24
+ "num_key_value_heads": 8,
25
+ "pad_token_id": 2,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 2.0,
29
+ "type": "dynamic"
30
+ },
31
+ "rope_theta": 1000000,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "float16",
34
+ "transformers_version": "4.37.2",
35
+ "use_cache": false,
36
+ "vocab_size": 92544
37
+ }
model_repository/preprocessing/1/tokenizer/configuration_internlm.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ InternLM model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
28
+
29
+
30
+ class InternLMConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
+ an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 32000):
42
+ Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`InternLMModel`]
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 11008):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
59
+ `num_attention_heads`.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
73
+ Whether to tie weight embeddings
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import InternLMModel, InternLMConfig
78
+
79
+ >>> # Initializing a InternLM internlm-7b style configuration
80
+ >>> configuration = InternLMConfig()
81
+
82
+ >>> # Initializing a model from the internlm-7b style configuration
83
+ >>> model = InternLMModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+ model_type = "internlm"
89
+ _auto_class = "AutoConfig"
90
+
91
+ def __init__( # pylint: disable=W0102
92
+ self,
93
+ vocab_size=103168,
94
+ hidden_size=4096,
95
+ intermediate_size=11008,
96
+ num_hidden_layers=32,
97
+ num_attention_heads=32,
98
+ num_key_value_heads=None,
99
+ hidden_act="silu",
100
+ max_position_embeddings=2048,
101
+ initializer_range=0.02,
102
+ rms_norm_eps=1e-6,
103
+ use_cache=True,
104
+ pad_token_id=0,
105
+ bos_token_id=1,
106
+ eos_token_id=2,
107
+ tie_word_embeddings=False,
108
+ bias=True,
109
+ rope_theta=10000,
110
+ rope_scaling=None,
111
+ attn_implementation="eager",
112
+ **kwargs,
113
+ ):
114
+ self.vocab_size = vocab_size
115
+ self.max_position_embeddings = max_position_embeddings
116
+ self.hidden_size = hidden_size
117
+ self.intermediate_size = intermediate_size
118
+ self.num_hidden_layers = num_hidden_layers
119
+ self.num_attention_heads = num_attention_heads
120
+ self.bias = bias
121
+
122
+ if num_key_value_heads is None:
123
+ num_key_value_heads = num_attention_heads
124
+ self.num_key_value_heads = num_key_value_heads
125
+
126
+ self.hidden_act = hidden_act
127
+ self.initializer_range = initializer_range
128
+ self.rms_norm_eps = rms_norm_eps
129
+ self.use_cache = use_cache
130
+ self.rope_theta = rope_theta
131
+ self.rope_scaling = rope_scaling
132
+ self._rope_scaling_validation()
133
+
134
+ self.attn_implementation = attn_implementation
135
+ if self.attn_implementation is None:
136
+ self.attn_implementation = "eager"
137
+ super().__init__(
138
+ pad_token_id=pad_token_id,
139
+ bos_token_id=bos_token_id,
140
+ eos_token_id=eos_token_id,
141
+ tie_word_embeddings=tie_word_embeddings,
142
+ **kwargs,
143
+ )
144
+
145
+ def _rope_scaling_validation(self):
146
+ """
147
+ Validate the `rope_scaling` configuration.
148
+ """
149
+ if self.rope_scaling is None:
150
+ return
151
+
152
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
153
+ raise ValueError(
154
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
155
+ f"got {self.rope_scaling}"
156
+ )
157
+ rope_scaling_type = self.rope_scaling.get("type", None)
158
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
159
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
160
+ raise ValueError(
161
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
162
+ )
163
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
164
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
model_repository/preprocessing/1/tokenizer/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.37.2"
7
+ }
model_repository/preprocessing/1/tokenizer/modeling_internlm2.py ADDED
@@ -0,0 +1,1385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (
31
+ BaseModelOutputWithPast,
32
+ CausalLMOutputWithPast,
33
+ SequenceClassifierOutputWithPast,
34
+ )
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.utils import (
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+
43
+ try:
44
+ from transformers.generation.streamers import BaseStreamer
45
+ except: # noqa # pylint: disable=bare-except
46
+ BaseStreamer = None
47
+
48
+ from .configuration_internlm import InternLMConfig as InternLM2Config
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CONFIG_FOR_DOC = "InternLM2Config"
53
+
54
+ flash_attn_func, flash_attn_varlen_func = None, None
55
+ pad_input, index_first_axis, unpad_input = None, None, None
56
+ def _import_flash_attn():
57
+ global flash_attn_func, flash_attn_varlen_func
58
+ global pad_input, index_first_axis, unpad_input
59
+ try:
60
+ from flash_attn import flash_attn_func as _flash_attn_func, flash_attn_varlen_func as _flash_attn_varlen_func
61
+ from flash_attn.bert_padding import pad_input as _pad_input, index_first_axis as _index_first_axis, unpad_input as _unpad_input
62
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
63
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
64
+ except ImportError:
65
+ raise ImportError("flash_attn is not installed.")
66
+
67
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
68
+ def _get_unpad_data(attention_mask):
69
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
70
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
71
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
72
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
73
+ return (
74
+ indices,
75
+ cu_seqlens,
76
+ max_seqlen_in_batch,
77
+ )
78
+
79
+
80
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
81
+ def _make_causal_mask(
82
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
83
+ ):
84
+ """
85
+ Make causal mask used for bi-directional self-attention.
86
+ """
87
+ bsz, tgt_len = input_ids_shape
88
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
89
+ mask_cond = torch.arange(mask.size(-1), device=device)
90
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
91
+ mask = mask.to(dtype)
92
+
93
+ if past_key_values_length > 0:
94
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
95
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
96
+
97
+
98
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
99
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
100
+ """
101
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
102
+ """
103
+ bsz, src_len = mask.size()
104
+ tgt_len = tgt_len if tgt_len is not None else src_len
105
+
106
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
107
+
108
+ inverted_mask = 1.0 - expanded_mask
109
+
110
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
111
+
112
+
113
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
114
+ class InternLM2RMSNorm(nn.Module):
115
+ def __init__(self, hidden_size, eps=1e-6):
116
+ """
117
+ InternLM2RMSNorm is equivalent to T5LayerNorm
118
+ """
119
+ super().__init__()
120
+ self.weight = nn.Parameter(torch.ones(hidden_size))
121
+ self.variance_epsilon = eps
122
+
123
+ def forward(self, hidden_states):
124
+ input_dtype = hidden_states.dtype
125
+ hidden_states = hidden_states.to(torch.float32)
126
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
127
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
128
+ return self.weight * hidden_states.to(input_dtype)
129
+
130
+
131
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
132
+ class InternLM2RotaryEmbedding(nn.Module):
133
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
134
+ super().__init__()
135
+
136
+ self.dim = dim
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.base = base
139
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
140
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
141
+
142
+ # Build here to make `torch.jit.trace` work.
143
+ self._set_cos_sin_cache(
144
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
145
+ )
146
+
147
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
148
+ self.max_seq_len_cached = seq_len
149
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
150
+
151
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
152
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
153
+ emb = torch.cat((freqs, freqs), dim=-1)
154
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
155
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
156
+
157
+ def forward(self, x, seq_len=None):
158
+ # x: [bs, num_attention_heads, seq_len, head_size]
159
+ if seq_len > self.max_seq_len_cached:
160
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
161
+
162
+ return (
163
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
164
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
165
+ )
166
+
167
+
168
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
169
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
170
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
171
+
172
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
173
+ self.scaling_factor = scaling_factor
174
+ super().__init__(dim, max_position_embeddings, base, device)
175
+
176
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
177
+ self.max_seq_len_cached = seq_len
178
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
179
+ t = t / self.scaling_factor
180
+
181
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
182
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
183
+ emb = torch.cat((freqs, freqs), dim=-1)
184
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
185
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
186
+
187
+
188
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
189
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
190
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
191
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
192
+ """
193
+
194
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
195
+ self.scaling_factor = scaling_factor
196
+ super().__init__(dim, max_position_embeddings, base, device)
197
+
198
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
199
+ self.max_seq_len_cached = seq_len
200
+
201
+ if seq_len > self.max_position_embeddings:
202
+ base = self.base * (
203
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
204
+ ) ** (self.dim / (self.dim - 2))
205
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
206
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
207
+
208
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
209
+
210
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
211
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
212
+ emb = torch.cat((freqs, freqs), dim=-1)
213
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
214
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
215
+
216
+
217
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
218
+ def rotate_half(x):
219
+ """Rotates half the hidden dims of the input."""
220
+ x1 = x[..., : x.shape[-1] // 2]
221
+ x2 = x[..., x.shape[-1] // 2 :]
222
+ return torch.cat((-x2, x1), dim=-1)
223
+
224
+
225
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
226
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
227
+ """Applies Rotary Position Embedding to the query and key tensors."""
228
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
229
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
230
+ q_embed = (q * cos) + (rotate_half(q) * sin)
231
+ k_embed = (k * cos) + (rotate_half(k) * sin)
232
+ return q_embed, k_embed
233
+
234
+
235
+ class InternLM2MLP(nn.Module):
236
+ def __init__(self, config):
237
+ super().__init__()
238
+ self.config = config
239
+ self.hidden_size = config.hidden_size
240
+ self.intermediate_size = config.intermediate_size
241
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
242
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
243
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
244
+ self.act_fn = ACT2FN[config.hidden_act]
245
+
246
+ def forward(self, x):
247
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
248
+
249
+ return down_proj
250
+
251
+
252
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
253
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
254
+ """
255
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
256
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
257
+ """
258
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
259
+ if n_rep == 1:
260
+ return hidden_states
261
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
262
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
263
+
264
+
265
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
266
+ class InternLM2Attention(nn.Module):
267
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
268
+
269
+ def __init__(self, config: InternLM2Config):
270
+ super().__init__()
271
+ self.config = config
272
+ self.hidden_size = config.hidden_size
273
+ self.num_heads = config.num_attention_heads
274
+ self.head_dim = self.hidden_size // self.num_heads
275
+ self.num_key_value_heads = config.num_key_value_heads
276
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
277
+ self.max_position_embeddings = config.max_position_embeddings
278
+ self.is_causal = True
279
+
280
+ if (self.head_dim * self.num_heads) != self.hidden_size:
281
+ raise ValueError(
282
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
283
+ f" and `num_heads`: {self.num_heads})."
284
+ )
285
+
286
+ self.wqkv = nn.Linear(
287
+ self.hidden_size,
288
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
289
+ bias=config.bias,
290
+ )
291
+
292
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
293
+ self._init_rope()
294
+
295
+ def _init_rope(self):
296
+ if self.config.rope_scaling is None:
297
+ self.rotary_emb = InternLM2RotaryEmbedding(
298
+ self.head_dim,
299
+ max_position_embeddings=self.max_position_embeddings,
300
+ base=self.config.rope_theta,
301
+ )
302
+ else:
303
+ scaling_type = self.config.rope_scaling["type"]
304
+ scaling_factor = self.config.rope_scaling["factor"]
305
+ if scaling_type == "dynamic":
306
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
307
+ self.head_dim,
308
+ max_position_embeddings=self.max_position_embeddings,
309
+ base=self.config.rope_theta,
310
+ scaling_factor=scaling_factor,
311
+ )
312
+ elif scaling_type == "linear":
313
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
314
+ self.head_dim,
315
+ max_position_embeddings=self.max_position_embeddings,
316
+ base=self.config.rope_theta,
317
+ scaling_factor=scaling_factor,
318
+ )
319
+ else:
320
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
321
+ return self.rotary_emb
322
+
323
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
324
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
325
+
326
+ def forward(
327
+ self,
328
+ hidden_states: torch.Tensor,
329
+ attention_mask: Optional[torch.Tensor] = None,
330
+ position_ids: Optional[torch.LongTensor] = None,
331
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
332
+ output_attentions: bool = False,
333
+ use_cache: bool = False,
334
+ **kwargs,
335
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
336
+ if "padding_mask" in kwargs:
337
+ warnings.warn(
338
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
339
+ "Please make sure use `attention_mask` instead.`"
340
+ )
341
+
342
+ bsz, q_len, _ = hidden_states.size()
343
+
344
+ qkv_states = self.wqkv(hidden_states)
345
+
346
+ qkv_states = rearrange(
347
+ qkv_states,
348
+ "b q (h gs d) -> b q h gs d",
349
+ gs=2 + self.num_key_value_groups,
350
+ d=self.head_dim,
351
+ )
352
+
353
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
354
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
355
+ key_states = qkv_states[..., -2, :]
356
+ value_states = qkv_states[..., -1, :]
357
+
358
+ query_states = query_states.transpose(1, 2)
359
+ key_states = key_states.transpose(1, 2)
360
+ value_states = value_states.transpose(1, 2)
361
+
362
+ kv_seq_len = key_states.shape[-2]
363
+ if past_key_value is not None:
364
+ kv_seq_len += past_key_value[0].shape[-2]
365
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
366
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
367
+
368
+ if past_key_value is not None:
369
+ # reuse k, v, self_attention
370
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
371
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
372
+
373
+ past_key_value = (key_states, value_states) if use_cache else None
374
+
375
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
376
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
377
+
378
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
379
+
380
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
381
+ raise ValueError(
382
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
383
+ f" {attn_weights.size()}"
384
+ )
385
+
386
+ if attention_mask is not None:
387
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
388
+ raise ValueError(
389
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
390
+ )
391
+ attn_weights = attn_weights + attention_mask
392
+
393
+ # upcast attention to fp32
394
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
395
+ attn_output = torch.matmul(attn_weights, value_states)
396
+
397
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
398
+ raise ValueError(
399
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
400
+ f" {attn_output.size()}"
401
+ )
402
+
403
+ attn_output = attn_output.transpose(1, 2).contiguous()
404
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
405
+
406
+ attn_output = self.wo(attn_output)
407
+
408
+ if not output_attentions:
409
+ attn_weights = None
410
+
411
+ return attn_output, attn_weights, past_key_value
412
+
413
+
414
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
415
+ class InternLM2FlashAttention2(InternLM2Attention):
416
+ """
417
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
418
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
419
+ flash attention and deal with padding tokens in case the input contains any of them.
420
+ """
421
+
422
+ def forward(
423
+ self,
424
+ hidden_states: torch.Tensor,
425
+ attention_mask: Optional[torch.LongTensor] = None,
426
+ position_ids: Optional[torch.LongTensor] = None,
427
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
428
+ output_attentions: bool = False,
429
+ use_cache: bool = False,
430
+ **kwargs,
431
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
432
+ # InternLM2FlashAttention2 attention does not support output_attentions
433
+ if "padding_mask" in kwargs:
434
+ warnings.warn(
435
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
436
+ "Please make sure use `attention_mask` instead.`"
437
+ )
438
+
439
+ # overwrite attention_mask with padding_mask
440
+ attention_mask = kwargs.pop("padding_mask")
441
+
442
+ output_attentions = False
443
+
444
+ bsz, q_len, _ = hidden_states.size()
445
+
446
+ qkv_states = self.wqkv(hidden_states)
447
+
448
+ qkv_states = rearrange(
449
+ qkv_states,
450
+ "b q (h gs d) -> b q h gs d",
451
+ gs=2 + self.num_key_value_groups,
452
+ d=self.head_dim,
453
+ )
454
+
455
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
456
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
457
+ key_states = qkv_states[..., -2, :]
458
+ value_states = qkv_states[..., -1, :]
459
+
460
+ query_states = query_states.transpose(1, 2)
461
+ key_states = key_states.transpose(1, 2)
462
+ value_states = value_states.transpose(1, 2)
463
+
464
+ kv_seq_len = key_states.shape[-2]
465
+ if past_key_value is not None:
466
+ kv_seq_len += past_key_value[0].shape[-2]
467
+
468
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
469
+
470
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
471
+
472
+ if past_key_value is not None:
473
+ # reuse k, v, self_attention
474
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
475
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
476
+
477
+ past_key_value = (key_states, value_states) if use_cache else None
478
+
479
+ query_states = query_states.transpose(1, 2)
480
+ key_states = key_states.transpose(1, 2)
481
+ value_states = value_states.transpose(1, 2)
482
+
483
+ attn_output = self._flash_attention_forward(
484
+ query_states, key_states, value_states, attention_mask, q_len
485
+ )
486
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
487
+ attn_output = self.wo(attn_output)
488
+
489
+ if not output_attentions:
490
+ attn_weights = None
491
+
492
+ return attn_output, attn_weights, past_key_value
493
+
494
+ def _flash_attention_forward(
495
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
496
+ ):
497
+ """
498
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
499
+ first unpad the input, then computes the attention scores and pad the final attention scores.
500
+
501
+ Args:
502
+ query_states (`torch.Tensor`):
503
+ Input query states to be passed to Flash Attention API
504
+ key_states (`torch.Tensor`):
505
+ Input key states to be passed to Flash Attention API
506
+ value_states (`torch.Tensor`):
507
+ Input value states to be passed to Flash Attention API
508
+ attention_mask (`torch.Tensor`):
509
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
510
+ position of padding tokens and 1 for the position of non-padding tokens.
511
+ dropout (`int`, *optional*):
512
+ Attention dropout
513
+ softmax_scale (`float`, *optional*):
514
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
515
+ """
516
+ # Contains at least one padding token in the sequence
517
+ causal = self.is_causal and query_length != 1
518
+ if attention_mask is not None:
519
+ batch_size = query_states.shape[0]
520
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
521
+ query_states, key_states, value_states, attention_mask, query_length
522
+ )
523
+
524
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
525
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
526
+
527
+ attn_output_unpad = flash_attn_varlen_func(
528
+ query_states,
529
+ key_states,
530
+ value_states,
531
+ cu_seqlens_q=cu_seqlens_q,
532
+ cu_seqlens_k=cu_seqlens_k,
533
+ max_seqlen_q=max_seqlen_in_batch_q,
534
+ max_seqlen_k=max_seqlen_in_batch_k,
535
+ dropout_p=dropout,
536
+ softmax_scale=softmax_scale,
537
+ causal=causal,
538
+ )
539
+
540
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
541
+ else:
542
+ attn_output = flash_attn_func(
543
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
544
+ )
545
+
546
+ return attn_output
547
+
548
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
549
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
550
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
551
+
552
+ key_layer = index_first_axis(
553
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
554
+ )
555
+ value_layer = index_first_axis(
556
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
557
+ )
558
+
559
+ if query_length == kv_seq_len:
560
+ query_layer = index_first_axis(
561
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
562
+ )
563
+ cu_seqlens_q = cu_seqlens_k
564
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
565
+ indices_q = indices_k
566
+ elif query_length == 1:
567
+ max_seqlen_in_batch_q = 1
568
+ cu_seqlens_q = torch.arange(
569
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
570
+ ) # There is a memcpy here, that is very bad.
571
+ indices_q = cu_seqlens_q[:-1]
572
+ query_layer = query_layer.squeeze(1)
573
+ else:
574
+ # The -q_len: slice assumes left padding.
575
+ attention_mask = attention_mask[:, -query_length:]
576
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
577
+
578
+ return (
579
+ query_layer,
580
+ key_layer,
581
+ value_layer,
582
+ indices_q.to(torch.int64),
583
+ (cu_seqlens_q, cu_seqlens_k),
584
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
585
+ )
586
+
587
+ INTERNLM2_ATTENTION_CLASSES = {
588
+ "eager": InternLM2Attention,
589
+ "flash_attention_2": InternLM2FlashAttention2,
590
+ }
591
+
592
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
593
+ class InternLM2DecoderLayer(nn.Module):
594
+ def __init__(self, config: InternLM2Config):
595
+ super().__init__()
596
+ self.hidden_size = config.hidden_size
597
+
598
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
599
+
600
+ self.feed_forward = InternLM2MLP(config)
601
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
602
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
603
+
604
+ def forward(
605
+ self,
606
+ hidden_states: torch.Tensor,
607
+ attention_mask: Optional[torch.Tensor] = None,
608
+ position_ids: Optional[torch.LongTensor] = None,
609
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
610
+ output_attentions: Optional[bool] = False,
611
+ use_cache: Optional[bool] = False,
612
+ **kwargs,
613
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
614
+ """
615
+ Args:
616
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
617
+ attention_mask (`torch.FloatTensor`, *optional*):
618
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
619
+ query_sequence_length, key_sequence_length)` if default attention is used.
620
+ output_attentions (`bool`, *optional*):
621
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
622
+ returned tensors for more detail.
623
+ use_cache (`bool`, *optional*):
624
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
625
+ (see `past_key_values`).
626
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
627
+ """
628
+ if "padding_mask" in kwargs:
629
+ warnings.warn(
630
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
631
+ "Please make sure use `attention_mask` instead.`"
632
+ )
633
+
634
+ residual = hidden_states
635
+
636
+ hidden_states = self.attention_norm(hidden_states)
637
+
638
+ # Self Attention
639
+ hidden_states, self_attn_weights, present_key_value = self.attention(
640
+ hidden_states=hidden_states,
641
+ attention_mask=attention_mask,
642
+ position_ids=position_ids,
643
+ past_key_value=past_key_value,
644
+ output_attentions=output_attentions,
645
+ use_cache=use_cache,
646
+ **kwargs,
647
+ )
648
+ hidden_states = residual + hidden_states
649
+
650
+ # Fully Connected
651
+ residual = hidden_states
652
+ hidden_states = self.ffn_norm(hidden_states)
653
+ hidden_states = self.feed_forward(hidden_states)
654
+ hidden_states = residual + hidden_states
655
+
656
+ outputs = (hidden_states,)
657
+
658
+ if output_attentions:
659
+ outputs += (self_attn_weights,)
660
+
661
+ if use_cache:
662
+ outputs += (present_key_value,)
663
+
664
+ return outputs
665
+
666
+
667
+ InternLM2_START_DOCSTRING = r"""
668
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
669
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
670
+ etc.)
671
+
672
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
673
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
674
+ and behavior.
675
+
676
+ Parameters:
677
+ config ([`InternLM2Config`]):
678
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
679
+ load the weights associated with the model, only the configuration. Check out the
680
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
681
+ """
682
+
683
+
684
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
685
+ @add_start_docstrings(
686
+ "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
687
+ InternLM2_START_DOCSTRING,
688
+ )
689
+ class InternLM2PreTrainedModel(PreTrainedModel):
690
+ config_class = InternLM2Config
691
+ base_model_prefix = "model"
692
+ supports_gradient_checkpointing = True
693
+ _no_split_modules = ["InternLM2DecoderLayer"]
694
+ _skip_keys_device_placement = "past_key_values"
695
+
696
+ def _init_weights(self, module):
697
+ std = self.config.initializer_range
698
+ if isinstance(module, nn.Linear):
699
+ module.weight.data.normal_(mean=0.0, std=std)
700
+ if module.bias is not None:
701
+ module.bias.data.zero_()
702
+ elif isinstance(module, nn.Embedding):
703
+ module.weight.data.normal_(mean=0.0, std=std)
704
+ if module.padding_idx is not None:
705
+ module.weight.data[module.padding_idx].zero_()
706
+
707
+
708
+ InternLM2_INPUTS_DOCSTRING = r"""
709
+ Args:
710
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
711
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
712
+ it.
713
+
714
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
715
+ [`PreTrainedTokenizer.__call__`] for details.
716
+
717
+ [What are input IDs?](../glossary#input-ids)
718
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
719
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
720
+
721
+ - 1 for tokens that are **not masked**,
722
+ - 0 for tokens that are **masked**.
723
+
724
+ [What are attention masks?](../glossary#attention-mask)
725
+
726
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
727
+ [`PreTrainedTokenizer.__call__`] for details.
728
+
729
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
730
+ `past_key_values`).
731
+
732
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
733
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
734
+ information on the default strategy.
735
+
736
+ - 1 indicates the head is **not masked**,
737
+ - 0 indicates the head is **masked**.
738
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
739
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
740
+ config.n_positions - 1]`.
741
+
742
+ [What are position IDs?](../glossary#position-ids)
743
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
744
+ when `config.use_cache=True`):
745
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
746
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
747
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
748
+
749
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
750
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
751
+
752
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
753
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
754
+ of shape `(batch_size, sequence_length)`.
755
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
756
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
757
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
758
+ model's internal embedding lookup matrix.
759
+ use_cache (`bool`, *optional*):
760
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
761
+ `past_key_values`).
762
+ output_attentions (`bool`, *optional*):
763
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
764
+ tensors for more detail.
765
+ output_hidden_states (`bool`, *optional*):
766
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
767
+ more detail.
768
+ return_dict (`bool`, *optional*):
769
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
770
+ """
771
+
772
+
773
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
774
+ @add_start_docstrings(
775
+ "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
776
+ InternLM2_START_DOCSTRING,
777
+ )
778
+ class InternLM2Model(InternLM2PreTrainedModel):
779
+ """
780
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
781
+
782
+ Args:
783
+ config: InternLM2Config
784
+ """
785
+
786
+ _auto_class = "AutoModel"
787
+
788
+ def __init__(self, config: InternLM2Config):
789
+ super().__init__(config)
790
+ self.padding_idx = config.pad_token_id
791
+ self.vocab_size = config.vocab_size
792
+ self.config = config
793
+
794
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
795
+
796
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
797
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
798
+
799
+ self.gradient_checkpointing = False
800
+ # Initialize weights and apply final processing
801
+ self.post_init()
802
+
803
+ def get_input_embeddings(self):
804
+ return self.tok_embeddings
805
+
806
+ def set_input_embeddings(self, value):
807
+ self.tok_embeddings = value
808
+
809
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
810
+ # create causal mask
811
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
812
+ combined_attention_mask = None
813
+ if input_shape[-1] > 1:
814
+ combined_attention_mask = _make_causal_mask(
815
+ input_shape,
816
+ inputs_embeds.dtype,
817
+ device=inputs_embeds.device,
818
+ past_key_values_length=past_key_values_length,
819
+ )
820
+
821
+ if attention_mask is not None:
822
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
823
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
824
+ inputs_embeds.device
825
+ )
826
+ combined_attention_mask = (
827
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
828
+ )
829
+
830
+ return combined_attention_mask
831
+
832
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
833
+ def forward(
834
+ self,
835
+ input_ids: torch.LongTensor = None,
836
+ attention_mask: Optional[torch.Tensor] = None,
837
+ position_ids: Optional[torch.LongTensor] = None,
838
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
839
+ inputs_embeds: Optional[torch.FloatTensor] = None,
840
+ use_cache: Optional[bool] = None,
841
+ output_attentions: Optional[bool] = None,
842
+ output_hidden_states: Optional[bool] = None,
843
+ return_dict: Optional[bool] = None,
844
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
845
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
846
+ output_hidden_states = (
847
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
848
+ )
849
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
850
+
851
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
852
+
853
+ if self.config.attn_implementation == "flash_attention_2":
854
+ _import_flash_attn()
855
+
856
+ # retrieve input_ids and inputs_embeds
857
+ if input_ids is not None and inputs_embeds is not None:
858
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
859
+ elif input_ids is not None:
860
+ batch_size, seq_length = input_ids.shape[:2]
861
+ elif inputs_embeds is not None:
862
+ batch_size, seq_length = inputs_embeds.shape[:2]
863
+ else:
864
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
865
+
866
+ seq_length_with_past = seq_length
867
+ past_key_values_length = 0
868
+ if past_key_values is not None:
869
+ past_key_values_length = past_key_values[0][0].shape[2]
870
+ seq_length_with_past = seq_length_with_past + past_key_values_length
871
+
872
+ if position_ids is None:
873
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
874
+ position_ids = torch.arange(
875
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
876
+ )
877
+ position_ids = position_ids.unsqueeze(0)
878
+
879
+ if inputs_embeds is None:
880
+ inputs_embeds = self.tok_embeddings(input_ids)
881
+
882
+ if self.config.attn_implementation == "flash_attention_2":
883
+ # 2d mask is passed through the layers
884
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
885
+ else:
886
+ if attention_mask is None:
887
+ attention_mask = torch.ones(
888
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
889
+ )
890
+ attention_mask = self._prepare_decoder_attention_mask(
891
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
892
+ )
893
+
894
+ # embed positions
895
+ hidden_states = inputs_embeds
896
+
897
+ if self.gradient_checkpointing and self.training:
898
+ if use_cache:
899
+ logger.warning_once(
900
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
901
+ )
902
+ use_cache = False
903
+
904
+ # decoder layers
905
+ all_hidden_states = () if output_hidden_states else None
906
+ all_self_attns = () if output_attentions else None
907
+ next_decoder_cache = () if use_cache else None
908
+
909
+ for idx, decoder_layer in enumerate(self.layers):
910
+ if output_hidden_states:
911
+ all_hidden_states += (hidden_states,)
912
+
913
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
914
+
915
+ if self.gradient_checkpointing and self.training:
916
+
917
+ def create_custom_forward(module):
918
+ def custom_forward(*inputs):
919
+ # None for past_key_value
920
+ return module(*inputs, output_attentions, None)
921
+
922
+ return custom_forward
923
+
924
+ layer_outputs = torch.utils.checkpoint.checkpoint(
925
+ create_custom_forward(decoder_layer),
926
+ hidden_states,
927
+ attention_mask,
928
+ position_ids,
929
+ None,
930
+ )
931
+ else:
932
+ layer_outputs = decoder_layer(
933
+ hidden_states,
934
+ attention_mask=attention_mask,
935
+ position_ids=position_ids,
936
+ past_key_value=past_key_value,
937
+ output_attentions=output_attentions,
938
+ use_cache=use_cache,
939
+ )
940
+
941
+ hidden_states = layer_outputs[0]
942
+
943
+ if use_cache:
944
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
945
+
946
+ if output_attentions:
947
+ all_self_attns += (layer_outputs[1],)
948
+
949
+ hidden_states = self.norm(hidden_states)
950
+
951
+ # add hidden states from the last decoder layer
952
+ if output_hidden_states:
953
+ all_hidden_states += (hidden_states,)
954
+
955
+ next_cache = next_decoder_cache if use_cache else None
956
+ if not return_dict:
957
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
958
+ return BaseModelOutputWithPast(
959
+ last_hidden_state=hidden_states,
960
+ past_key_values=next_cache,
961
+ hidden_states=all_hidden_states,
962
+ attentions=all_self_attns,
963
+ )
964
+
965
+
966
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
967
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
968
+ _auto_class = "AutoModelForCausalLM"
969
+
970
+ _tied_weights_keys = ["output.weight"]
971
+
972
+ def __init__(self, config):
973
+ super().__init__(config)
974
+ self.model = InternLM2Model(config)
975
+ self.vocab_size = config.vocab_size
976
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
977
+
978
+ # Initialize weights and apply final processing
979
+ self.post_init()
980
+
981
+ def get_input_embeddings(self):
982
+ return self.model.tok_embeddings
983
+
984
+ def set_input_embeddings(self, value):
985
+ self.model.tok_embeddings = value
986
+
987
+ def get_output_embeddings(self):
988
+ return self.output
989
+
990
+ def set_output_embeddings(self, new_embeddings):
991
+ self.output = new_embeddings
992
+
993
+ def set_decoder(self, decoder):
994
+ self.model = decoder
995
+
996
+ def get_decoder(self):
997
+ return self.model
998
+
999
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1000
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1001
+ def forward(
1002
+ self,
1003
+ input_ids: torch.LongTensor = None,
1004
+ attention_mask: Optional[torch.Tensor] = None,
1005
+ position_ids: Optional[torch.LongTensor] = None,
1006
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1007
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1008
+ labels: Optional[torch.LongTensor] = None,
1009
+ use_cache: Optional[bool] = None,
1010
+ output_attentions: Optional[bool] = None,
1011
+ output_hidden_states: Optional[bool] = None,
1012
+ return_dict: Optional[bool] = None,
1013
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1014
+ r"""
1015
+ Args:
1016
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1017
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1018
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1019
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1020
+
1021
+ Returns:
1022
+
1023
+ Example:
1024
+
1025
+ ```python
1026
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1027
+
1028
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1029
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1030
+
1031
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1032
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1033
+
1034
+ >>> # Generate
1035
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1036
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1037
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1038
+ ```"""
1039
+
1040
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1041
+ output_hidden_states = (
1042
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1043
+ )
1044
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1045
+
1046
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1047
+ outputs = self.model(
1048
+ input_ids=input_ids,
1049
+ attention_mask=attention_mask,
1050
+ position_ids=position_ids,
1051
+ past_key_values=past_key_values,
1052
+ inputs_embeds=inputs_embeds,
1053
+ use_cache=use_cache,
1054
+ output_attentions=output_attentions,
1055
+ output_hidden_states=output_hidden_states,
1056
+ return_dict=return_dict,
1057
+ )
1058
+
1059
+ hidden_states = outputs[0]
1060
+ logits = self.output(hidden_states)
1061
+ logits = logits.float()
1062
+
1063
+ loss = None
1064
+ if labels is not None:
1065
+ # Shift so that tokens < n predict n
1066
+ shift_logits = logits[..., :-1, :].contiguous()
1067
+ shift_labels = labels[..., 1:].contiguous()
1068
+ # Flatten the tokens
1069
+ loss_fct = CrossEntropyLoss()
1070
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1071
+ shift_labels = shift_labels.view(-1)
1072
+ # Enable model parallelism
1073
+ shift_labels = shift_labels.to(shift_logits.device)
1074
+ loss = loss_fct(shift_logits, shift_labels)
1075
+
1076
+ if not return_dict:
1077
+ output = (logits,) + outputs[1:]
1078
+ return (loss,) + output if loss is not None else output
1079
+
1080
+ return CausalLMOutputWithPast(
1081
+ loss=loss,
1082
+ logits=logits,
1083
+ past_key_values=outputs.past_key_values,
1084
+ hidden_states=outputs.hidden_states,
1085
+ attentions=outputs.attentions,
1086
+ )
1087
+
1088
+ def prepare_inputs_for_generation(
1089
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1090
+ ):
1091
+ if past_key_values is not None:
1092
+ past_length = past_key_values[0][0].shape[2]
1093
+
1094
+ # Some generation methods already pass only the last input ID
1095
+ if input_ids.shape[1] > past_length:
1096
+ remove_prefix_length = past_length
1097
+ else:
1098
+ # Default to old behavior: keep only final ID
1099
+ remove_prefix_length = input_ids.shape[1] - 1
1100
+
1101
+ input_ids = input_ids[:, remove_prefix_length:]
1102
+
1103
+ position_ids = kwargs.get("position_ids", None)
1104
+ if attention_mask is not None and position_ids is None:
1105
+ # create position_ids on the fly for batch generation
1106
+ position_ids = attention_mask.long().cumsum(-1) - 1
1107
+ position_ids.masked_fill_(attention_mask == 0, 1)
1108
+ if past_key_values:
1109
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1110
+
1111
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1112
+ if inputs_embeds is not None and past_key_values is None:
1113
+ model_inputs = {"inputs_embeds": inputs_embeds}
1114
+ else:
1115
+ model_inputs = {"input_ids": input_ids}
1116
+
1117
+ model_inputs.update(
1118
+ {
1119
+ "position_ids": position_ids,
1120
+ "past_key_values": past_key_values,
1121
+ "use_cache": kwargs.get("use_cache"),
1122
+ "attention_mask": attention_mask,
1123
+ }
1124
+ )
1125
+ return model_inputs
1126
+
1127
+ @staticmethod
1128
+ def _reorder_cache(past_key_values, beam_idx):
1129
+ reordered_past = ()
1130
+ for layer_past in past_key_values:
1131
+ reordered_past += (
1132
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1133
+ )
1134
+ return reordered_past
1135
+
1136
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=""):
1137
+ prompt = ""
1138
+ if meta_instruction:
1139
+ prompt += f"""<s><|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1140
+ else:
1141
+ prompt += "<s>"
1142
+ for record in history:
1143
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1144
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1145
+ return tokenizer([prompt], return_tensors="pt")
1146
+
1147
+ @torch.no_grad()
1148
+ def chat(
1149
+ self,
1150
+ tokenizer,
1151
+ query: str,
1152
+ history: List[Tuple[str, str]] = [],
1153
+ streamer: Optional[BaseStreamer] = None,
1154
+ max_new_tokens: int = 1024,
1155
+ do_sample: bool = True,
1156
+ temperature: float = 0.8,
1157
+ top_p: float = 0.8,
1158
+ meta_instruction: str = "You are an AI assistant whose name is InternLM (书生·浦语).\n"
1159
+ "- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n"
1160
+ "- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.",
1161
+ **kwargs,
1162
+ ):
1163
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1164
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1165
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1166
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["<|im_end|>"])[0]]
1167
+ outputs = self.generate(
1168
+ **inputs,
1169
+ streamer=streamer,
1170
+ max_new_tokens=max_new_tokens,
1171
+ do_sample=do_sample,
1172
+ temperature=temperature,
1173
+ top_p=top_p,
1174
+ eos_token_id=eos_token_id,
1175
+ **kwargs,
1176
+ )
1177
+ outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :]
1178
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1179
+ response = response.split("<|im_end|>")[0]
1180
+ history = history + [(query, response)]
1181
+ return response, history
1182
+
1183
+ @torch.no_grad()
1184
+ def stream_chat(
1185
+ self,
1186
+ tokenizer,
1187
+ query: str,
1188
+ history: List[Tuple[str, str]] = [],
1189
+ max_new_tokens: int = 1024,
1190
+ do_sample: bool = True,
1191
+ temperature: float = 0.8,
1192
+ top_p: float = 0.8,
1193
+ **kwargs,
1194
+ ):
1195
+ """
1196
+ Return a generator in format: (response, history)
1197
+ Eg.
1198
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1199
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1200
+ """
1201
+ if BaseStreamer is None:
1202
+ raise ModuleNotFoundError(
1203
+ "The version of `transformers` is too low. Please make sure "
1204
+ "that you have installed `transformers>=4.28.0`."
1205
+ )
1206
+
1207
+ response_queue = queue.Queue(maxsize=20)
1208
+
1209
+ class ChatStreamer(BaseStreamer):
1210
+ def __init__(self, tokenizer) -> None:
1211
+ super().__init__()
1212
+ self.tokenizer = tokenizer
1213
+ self.queue = response_queue
1214
+ self.query = query
1215
+ self.history = history
1216
+ self.response = ""
1217
+ self.received_inputs = False
1218
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1219
+
1220
+ def put(self, value):
1221
+ if len(value.shape) > 1 and value.shape[0] > 1:
1222
+ raise ValueError("ChatStreamer only supports batch size 1")
1223
+ elif len(value.shape) > 1:
1224
+ value = value[0]
1225
+
1226
+ if not self.received_inputs:
1227
+ # The first received value is input_ids, ignore here
1228
+ self.received_inputs = True
1229
+ return
1230
+
1231
+ token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
1232
+ if token.strip() != "<|im_end|>":
1233
+ self.response = self.response + token
1234
+ history = self.history + [(self.query, self.response)]
1235
+ self.queue.put((self.response, history))
1236
+
1237
+ def end(self):
1238
+ self.queue.put(None)
1239
+
1240
+ def stream_producer():
1241
+ return self.chat(
1242
+ tokenizer=tokenizer,
1243
+ query=query,
1244
+ streamer=ChatStreamer(tokenizer=tokenizer),
1245
+ history=history,
1246
+ max_new_tokens=max_new_tokens,
1247
+ do_sample=do_sample,
1248
+ temperature=temperature,
1249
+ top_p=top_p,
1250
+ **kwargs,
1251
+ )
1252
+
1253
+ def consumer():
1254
+ producer = threading.Thread(target=stream_producer)
1255
+ producer.start()
1256
+ while True:
1257
+ res = response_queue.get()
1258
+ if res is None:
1259
+ return
1260
+ yield res
1261
+
1262
+ return consumer()
1263
+
1264
+
1265
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1266
+ @add_start_docstrings(
1267
+ """
1268
+ The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1269
+
1270
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1271
+ as other causal models (e.g. GPT-2) do.
1272
+
1273
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1274
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1275
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1276
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1277
+ each row of the batch).
1278
+ """,
1279
+ InternLM2_START_DOCSTRING,
1280
+ )
1281
+ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1282
+ def __init__(self, config):
1283
+ super().__init__(config)
1284
+ self.num_labels = config.num_labels
1285
+ self.model = InternLM2Model(config)
1286
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1287
+
1288
+ # Initialize weights and apply final processing
1289
+ self.post_init()
1290
+
1291
+ def get_input_embeddings(self):
1292
+ return self.model.tok_embeddings
1293
+
1294
+ def set_input_embeddings(self, value):
1295
+ self.model.tok_embeddings = value
1296
+
1297
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1298
+ def forward(
1299
+ self,
1300
+ input_ids: torch.LongTensor = None,
1301
+ attention_mask: Optional[torch.Tensor] = None,
1302
+ position_ids: Optional[torch.LongTensor] = None,
1303
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1304
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1305
+ labels: Optional[torch.LongTensor] = None,
1306
+ use_cache: Optional[bool] = None,
1307
+ output_attentions: Optional[bool] = None,
1308
+ output_hidden_states: Optional[bool] = None,
1309
+ return_dict: Optional[bool] = None,
1310
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1311
+ r"""
1312
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1313
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1314
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1315
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1316
+ """
1317
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1318
+
1319
+ transformer_outputs = self.model(
1320
+ input_ids,
1321
+ attention_mask=attention_mask,
1322
+ position_ids=position_ids,
1323
+ past_key_values=past_key_values,
1324
+ inputs_embeds=inputs_embeds,
1325
+ use_cache=use_cache,
1326
+ output_attentions=output_attentions,
1327
+ output_hidden_states=output_hidden_states,
1328
+ return_dict=return_dict,
1329
+ )
1330
+ hidden_states = transformer_outputs[0]
1331
+ logits = self.score(hidden_states)
1332
+
1333
+ if input_ids is not None:
1334
+ batch_size = input_ids.shape[0]
1335
+ else:
1336
+ batch_size = inputs_embeds.shape[0]
1337
+
1338
+ if self.config.pad_token_id is None and batch_size != 1:
1339
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1340
+ if self.config.pad_token_id is None:
1341
+ sequence_lengths = -1
1342
+ else:
1343
+ if input_ids is not None:
1344
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1345
+ logits.device
1346
+ )
1347
+ else:
1348
+ sequence_lengths = -1
1349
+
1350
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1351
+
1352
+ loss = None
1353
+ if labels is not None:
1354
+ labels = labels.to(logits.device)
1355
+ if self.config.problem_type is None:
1356
+ if self.num_labels == 1:
1357
+ self.config.problem_type = "regression"
1358
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1359
+ self.config.problem_type = "single_label_classification"
1360
+ else:
1361
+ self.config.problem_type = "multi_label_classification"
1362
+
1363
+ if self.config.problem_type == "regression":
1364
+ loss_fct = MSELoss()
1365
+ if self.num_labels == 1:
1366
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1367
+ else:
1368
+ loss = loss_fct(pooled_logits, labels)
1369
+ elif self.config.problem_type == "single_label_classification":
1370
+ loss_fct = CrossEntropyLoss()
1371
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1372
+ elif self.config.problem_type == "multi_label_classification":
1373
+ loss_fct = BCEWithLogitsLoss()
1374
+ loss = loss_fct(pooled_logits, labels)
1375
+ if not return_dict:
1376
+ output = (pooled_logits,) + transformer_outputs[1:]
1377
+ return ((loss,) + output) if loss is not None else output
1378
+
1379
+ return SequenceClassifierOutputWithPast(
1380
+ loss=loss,
1381
+ logits=pooled_logits,
1382
+ past_key_values=transformer_outputs.past_key_values,
1383
+ hidden_states=transformer_outputs.hidden_states,
1384
+ attentions=transformer_outputs.attentions,
1385
+ )
model_repository/preprocessing/1/tokenizer/placeholder ADDED
File without changes
model_repository/preprocessing/1/tokenizer/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5251801088
4
+ },
5
+ "weight_map": {
6
+ "model.layers.0.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
7
+ "model.layers.0.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
8
+ "model.layers.0.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
9
+ "model.layers.0.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
10
+ "model.layers.0.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
11
+ "model.layers.0.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
12
+ "model.layers.0.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
13
+ "model.layers.0.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
14
+ "model.layers.0.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
15
+ "model.layers.0.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
16
+ "model.layers.0.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
17
+ "model.layers.0.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
18
+ "model.layers.0.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
19
+ "model.layers.0.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
20
+ "model.layers.0.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
21
+ "model.layers.0.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
22
+ "model.layers.0.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
23
+ "model.layers.1.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
24
+ "model.layers.1.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
25
+ "model.layers.1.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
26
+ "model.layers.1.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
27
+ "model.layers.1.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
28
+ "model.layers.1.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
29
+ "model.layers.1.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
30
+ "model.layers.1.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
31
+ "model.layers.1.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
32
+ "model.layers.1.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
33
+ "model.layers.1.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
34
+ "model.layers.1.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
35
+ "model.layers.1.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
36
+ "model.layers.1.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
37
+ "model.layers.1.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
38
+ "model.layers.1.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
39
+ "model.layers.1.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
40
+ "model.layers.10.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
41
+ "model.layers.10.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
42
+ "model.layers.10.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
43
+ "model.layers.10.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
44
+ "model.layers.10.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
45
+ "model.layers.10.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
46
+ "model.layers.10.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
47
+ "model.layers.10.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
48
+ "model.layers.10.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
49
+ "model.layers.10.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
50
+ "model.layers.10.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
51
+ "model.layers.10.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
52
+ "model.layers.10.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
53
+ "model.layers.10.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
54
+ "model.layers.10.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
55
+ "model.layers.10.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
56
+ "model.layers.10.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
57
+ "model.layers.11.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
58
+ "model.layers.11.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
59
+ "model.layers.11.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
60
+ "model.layers.11.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
61
+ "model.layers.11.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
62
+ "model.layers.11.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
63
+ "model.layers.11.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
64
+ "model.layers.11.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
65
+ "model.layers.11.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
66
+ "model.layers.11.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
67
+ "model.layers.11.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
68
+ "model.layers.11.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
69
+ "model.layers.11.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
70
+ "model.layers.11.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
71
+ "model.layers.11.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
72
+ "model.layers.11.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
73
+ "model.layers.11.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
74
+ "model.layers.12.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
75
+ "model.layers.12.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
76
+ "model.layers.12.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
77
+ "model.layers.12.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
78
+ "model.layers.12.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
79
+ "model.layers.12.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
80
+ "model.layers.12.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
81
+ "model.layers.12.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
82
+ "model.layers.12.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
83
+ "model.layers.12.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
84
+ "model.layers.12.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
85
+ "model.layers.12.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
86
+ "model.layers.12.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
87
+ "model.layers.12.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
88
+ "model.layers.12.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
89
+ "model.layers.12.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
90
+ "model.layers.12.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
91
+ "model.layers.13.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
92
+ "model.layers.13.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
93
+ "model.layers.13.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
94
+ "model.layers.13.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
95
+ "model.layers.13.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
96
+ "model.layers.13.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
97
+ "model.layers.13.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
98
+ "model.layers.13.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
99
+ "model.layers.13.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
100
+ "model.layers.13.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
101
+ "model.layers.13.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
102
+ "model.layers.13.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
103
+ "model.layers.13.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
104
+ "model.layers.13.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
105
+ "model.layers.13.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
106
+ "model.layers.13.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
107
+ "model.layers.13.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
108
+ "model.layers.14.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
109
+ "model.layers.14.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
110
+ "model.layers.14.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
111
+ "model.layers.14.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
112
+ "model.layers.14.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
113
+ "model.layers.14.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
114
+ "model.layers.14.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
115
+ "model.layers.14.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
116
+ "model.layers.14.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
117
+ "model.layers.14.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
118
+ "model.layers.14.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
119
+ "model.layers.14.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
120
+ "model.layers.14.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
121
+ "model.layers.14.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
122
+ "model.layers.14.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
123
+ "model.layers.14.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
124
+ "model.layers.14.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
125
+ "model.layers.15.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
126
+ "model.layers.15.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
127
+ "model.layers.15.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
128
+ "model.layers.15.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
129
+ "model.layers.15.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
130
+ "model.layers.15.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
131
+ "model.layers.15.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
132
+ "model.layers.15.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
133
+ "model.layers.15.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
134
+ "model.layers.15.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
135
+ "model.layers.15.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
136
+ "model.layers.15.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
137
+ "model.layers.15.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
138
+ "model.layers.15.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
139
+ "model.layers.15.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
140
+ "model.layers.15.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
141
+ "model.layers.15.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
142
+ "model.layers.16.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
143
+ "model.layers.16.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
144
+ "model.layers.16.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
145
+ "model.layers.16.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
146
+ "model.layers.16.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
147
+ "model.layers.16.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
148
+ "model.layers.16.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
149
+ "model.layers.16.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
150
+ "model.layers.16.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
151
+ "model.layers.16.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
152
+ "model.layers.16.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
153
+ "model.layers.16.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
154
+ "model.layers.16.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
155
+ "model.layers.16.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
156
+ "model.layers.16.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
157
+ "model.layers.16.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
158
+ "model.layers.16.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
159
+ "model.layers.17.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
160
+ "model.layers.17.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
161
+ "model.layers.17.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
162
+ "model.layers.17.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
163
+ "model.layers.17.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
164
+ "model.layers.17.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
165
+ "model.layers.17.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
166
+ "model.layers.17.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
167
+ "model.layers.17.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
168
+ "model.layers.17.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
169
+ "model.layers.17.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
170
+ "model.layers.17.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
171
+ "model.layers.17.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
172
+ "model.layers.17.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
173
+ "model.layers.17.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
174
+ "model.layers.17.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
175
+ "model.layers.17.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
176
+ "model.layers.18.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
177
+ "model.layers.18.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
178
+ "model.layers.18.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
179
+ "model.layers.18.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
180
+ "model.layers.18.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
181
+ "model.layers.18.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
182
+ "model.layers.18.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
183
+ "model.layers.18.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
184
+ "model.layers.18.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
185
+ "model.layers.18.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
186
+ "model.layers.18.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
187
+ "model.layers.18.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
188
+ "model.layers.18.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
189
+ "model.layers.18.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
190
+ "model.layers.18.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
191
+ "model.layers.18.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
192
+ "model.layers.18.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
193
+ "model.layers.19.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
194
+ "model.layers.19.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
195
+ "model.layers.19.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
196
+ "model.layers.19.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
197
+ "model.layers.19.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
198
+ "model.layers.19.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
199
+ "model.layers.19.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
200
+ "model.layers.19.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
201
+ "model.layers.19.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
202
+ "model.layers.19.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
203
+ "model.layers.19.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
204
+ "model.layers.19.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
205
+ "model.layers.19.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
206
+ "model.layers.19.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
207
+ "model.layers.19.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
208
+ "model.layers.19.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
209
+ "model.layers.19.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
210
+ "model.layers.2.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
211
+ "model.layers.2.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
212
+ "model.layers.2.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
213
+ "model.layers.2.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
214
+ "model.layers.2.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
215
+ "model.layers.2.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
216
+ "model.layers.2.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
217
+ "model.layers.2.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
218
+ "model.layers.2.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
219
+ "model.layers.2.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
220
+ "model.layers.2.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
221
+ "model.layers.2.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
222
+ "model.layers.2.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
223
+ "model.layers.2.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
224
+ "model.layers.2.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
225
+ "model.layers.2.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
226
+ "model.layers.2.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
227
+ "model.layers.20.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
228
+ "model.layers.20.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
229
+ "model.layers.20.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
230
+ "model.layers.20.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
231
+ "model.layers.20.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
232
+ "model.layers.20.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
233
+ "model.layers.20.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
234
+ "model.layers.20.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
235
+ "model.layers.20.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
236
+ "model.layers.20.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
237
+ "model.layers.20.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
238
+ "model.layers.20.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
239
+ "model.layers.20.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
240
+ "model.layers.20.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
241
+ "model.layers.20.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
242
+ "model.layers.20.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
243
+ "model.layers.20.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
244
+ "model.layers.21.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
245
+ "model.layers.21.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
246
+ "model.layers.21.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
247
+ "model.layers.21.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
248
+ "model.layers.21.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
249
+ "model.layers.21.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
250
+ "model.layers.21.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
251
+ "model.layers.21.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
252
+ "model.layers.21.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
253
+ "model.layers.21.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
254
+ "model.layers.21.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
255
+ "model.layers.21.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
256
+ "model.layers.21.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
257
+ "model.layers.21.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
258
+ "model.layers.21.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
259
+ "model.layers.21.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
260
+ "model.layers.21.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
261
+ "model.layers.22.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
262
+ "model.layers.22.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
263
+ "model.layers.22.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
264
+ "model.layers.22.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
265
+ "model.layers.22.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
266
+ "model.layers.22.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
267
+ "model.layers.22.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
268
+ "model.layers.22.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
269
+ "model.layers.22.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
270
+ "model.layers.22.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
271
+ "model.layers.22.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
272
+ "model.layers.22.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
273
+ "model.layers.22.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
274
+ "model.layers.22.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
275
+ "model.layers.22.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
276
+ "model.layers.22.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
277
+ "model.layers.22.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
278
+ "model.layers.23.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
279
+ "model.layers.23.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
280
+ "model.layers.23.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
281
+ "model.layers.23.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
282
+ "model.layers.23.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
283
+ "model.layers.23.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
284
+ "model.layers.23.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
285
+ "model.layers.23.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
286
+ "model.layers.23.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
287
+ "model.layers.23.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
288
+ "model.layers.23.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
289
+ "model.layers.23.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
290
+ "model.layers.23.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
291
+ "model.layers.23.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
292
+ "model.layers.23.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
293
+ "model.layers.23.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
294
+ "model.layers.23.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
295
+ "model.layers.24.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
296
+ "model.layers.24.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
297
+ "model.layers.24.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
298
+ "model.layers.24.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
299
+ "model.layers.24.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
300
+ "model.layers.24.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
301
+ "model.layers.24.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
302
+ "model.layers.24.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
303
+ "model.layers.24.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
304
+ "model.layers.24.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
305
+ "model.layers.24.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
306
+ "model.layers.24.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
307
+ "model.layers.24.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
308
+ "model.layers.24.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
309
+ "model.layers.24.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
310
+ "model.layers.24.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
311
+ "model.layers.24.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
312
+ "model.layers.25.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
313
+ "model.layers.25.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
314
+ "model.layers.25.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
315
+ "model.layers.25.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
316
+ "model.layers.25.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
317
+ "model.layers.25.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
318
+ "model.layers.25.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
319
+ "model.layers.25.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
320
+ "model.layers.25.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
321
+ "model.layers.25.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
322
+ "model.layers.25.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
323
+ "model.layers.25.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
324
+ "model.layers.25.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
325
+ "model.layers.25.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
326
+ "model.layers.25.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
327
+ "model.layers.25.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
328
+ "model.layers.25.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
329
+ "model.layers.26.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
330
+ "model.layers.26.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
331
+ "model.layers.26.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
332
+ "model.layers.26.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
333
+ "model.layers.26.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
334
+ "model.layers.26.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
335
+ "model.layers.26.attention_norm.weight": "pytorch_model-00002-of-00003.bin",
336
+ "model.layers.26.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
337
+ "model.layers.26.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
338
+ "model.layers.26.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
339
+ "model.layers.26.feed_forward.w2.qweight": "pytorch_model-00002-of-00003.bin",
340
+ "model.layers.26.feed_forward.w2.qzeros": "pytorch_model-00002-of-00003.bin",
341
+ "model.layers.26.feed_forward.w2.scales": "pytorch_model-00002-of-00003.bin",
342
+ "model.layers.26.feed_forward.w3.qweight": "pytorch_model-00002-of-00003.bin",
343
+ "model.layers.26.feed_forward.w3.qzeros": "pytorch_model-00002-of-00003.bin",
344
+ "model.layers.26.feed_forward.w3.scales": "pytorch_model-00002-of-00003.bin",
345
+ "model.layers.26.ffn_norm.weight": "pytorch_model-00002-of-00003.bin",
346
+ "model.layers.27.attention.wo.qweight": "pytorch_model-00002-of-00003.bin",
347
+ "model.layers.27.attention.wo.qzeros": "pytorch_model-00002-of-00003.bin",
348
+ "model.layers.27.attention.wo.scales": "pytorch_model-00002-of-00003.bin",
349
+ "model.layers.27.attention.wqkv.qweight": "pytorch_model-00002-of-00003.bin",
350
+ "model.layers.27.attention.wqkv.qzeros": "pytorch_model-00002-of-00003.bin",
351
+ "model.layers.27.attention.wqkv.scales": "pytorch_model-00002-of-00003.bin",
352
+ "model.layers.27.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
353
+ "model.layers.27.feed_forward.w1.qweight": "pytorch_model-00002-of-00003.bin",
354
+ "model.layers.27.feed_forward.w1.qzeros": "pytorch_model-00002-of-00003.bin",
355
+ "model.layers.27.feed_forward.w1.scales": "pytorch_model-00002-of-00003.bin",
356
+ "model.layers.27.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
357
+ "model.layers.27.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
358
+ "model.layers.27.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
359
+ "model.layers.27.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
360
+ "model.layers.27.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
361
+ "model.layers.27.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
362
+ "model.layers.27.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
363
+ "model.layers.28.attention.wo.qweight": "pytorch_model-00003-of-00003.bin",
364
+ "model.layers.28.attention.wo.qzeros": "pytorch_model-00003-of-00003.bin",
365
+ "model.layers.28.attention.wo.scales": "pytorch_model-00003-of-00003.bin",
366
+ "model.layers.28.attention.wqkv.qweight": "pytorch_model-00003-of-00003.bin",
367
+ "model.layers.28.attention.wqkv.qzeros": "pytorch_model-00003-of-00003.bin",
368
+ "model.layers.28.attention.wqkv.scales": "pytorch_model-00003-of-00003.bin",
369
+ "model.layers.28.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
370
+ "model.layers.28.feed_forward.w1.qweight": "pytorch_model-00003-of-00003.bin",
371
+ "model.layers.28.feed_forward.w1.qzeros": "pytorch_model-00003-of-00003.bin",
372
+ "model.layers.28.feed_forward.w1.scales": "pytorch_model-00003-of-00003.bin",
373
+ "model.layers.28.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
374
+ "model.layers.28.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
375
+ "model.layers.28.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
376
+ "model.layers.28.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
377
+ "model.layers.28.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
378
+ "model.layers.28.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
379
+ "model.layers.28.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
380
+ "model.layers.29.attention.wo.qweight": "pytorch_model-00003-of-00003.bin",
381
+ "model.layers.29.attention.wo.qzeros": "pytorch_model-00003-of-00003.bin",
382
+ "model.layers.29.attention.wo.scales": "pytorch_model-00003-of-00003.bin",
383
+ "model.layers.29.attention.wqkv.qweight": "pytorch_model-00003-of-00003.bin",
384
+ "model.layers.29.attention.wqkv.qzeros": "pytorch_model-00003-of-00003.bin",
385
+ "model.layers.29.attention.wqkv.scales": "pytorch_model-00003-of-00003.bin",
386
+ "model.layers.29.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
387
+ "model.layers.29.feed_forward.w1.qweight": "pytorch_model-00003-of-00003.bin",
388
+ "model.layers.29.feed_forward.w1.qzeros": "pytorch_model-00003-of-00003.bin",
389
+ "model.layers.29.feed_forward.w1.scales": "pytorch_model-00003-of-00003.bin",
390
+ "model.layers.29.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
391
+ "model.layers.29.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
392
+ "model.layers.29.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
393
+ "model.layers.29.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
394
+ "model.layers.29.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
395
+ "model.layers.29.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
396
+ "model.layers.29.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
397
+ "model.layers.3.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
398
+ "model.layers.3.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
399
+ "model.layers.3.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
400
+ "model.layers.3.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
401
+ "model.layers.3.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
402
+ "model.layers.3.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
403
+ "model.layers.3.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
404
+ "model.layers.3.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
405
+ "model.layers.3.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
406
+ "model.layers.3.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
407
+ "model.layers.3.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
408
+ "model.layers.3.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
409
+ "model.layers.3.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
410
+ "model.layers.3.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
411
+ "model.layers.3.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
412
+ "model.layers.3.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
413
+ "model.layers.3.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
414
+ "model.layers.30.attention.wo.qweight": "pytorch_model-00003-of-00003.bin",
415
+ "model.layers.30.attention.wo.qzeros": "pytorch_model-00003-of-00003.bin",
416
+ "model.layers.30.attention.wo.scales": "pytorch_model-00003-of-00003.bin",
417
+ "model.layers.30.attention.wqkv.qweight": "pytorch_model-00003-of-00003.bin",
418
+ "model.layers.30.attention.wqkv.qzeros": "pytorch_model-00003-of-00003.bin",
419
+ "model.layers.30.attention.wqkv.scales": "pytorch_model-00003-of-00003.bin",
420
+ "model.layers.30.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
421
+ "model.layers.30.feed_forward.w1.qweight": "pytorch_model-00003-of-00003.bin",
422
+ "model.layers.30.feed_forward.w1.qzeros": "pytorch_model-00003-of-00003.bin",
423
+ "model.layers.30.feed_forward.w1.scales": "pytorch_model-00003-of-00003.bin",
424
+ "model.layers.30.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
425
+ "model.layers.30.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
426
+ "model.layers.30.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
427
+ "model.layers.30.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
428
+ "model.layers.30.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
429
+ "model.layers.30.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
430
+ "model.layers.30.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
431
+ "model.layers.31.attention.wo.qweight": "pytorch_model-00003-of-00003.bin",
432
+ "model.layers.31.attention.wo.qzeros": "pytorch_model-00003-of-00003.bin",
433
+ "model.layers.31.attention.wo.scales": "pytorch_model-00003-of-00003.bin",
434
+ "model.layers.31.attention.wqkv.qweight": "pytorch_model-00003-of-00003.bin",
435
+ "model.layers.31.attention.wqkv.qzeros": "pytorch_model-00003-of-00003.bin",
436
+ "model.layers.31.attention.wqkv.scales": "pytorch_model-00003-of-00003.bin",
437
+ "model.layers.31.attention_norm.weight": "pytorch_model-00003-of-00003.bin",
438
+ "model.layers.31.feed_forward.w1.qweight": "pytorch_model-00003-of-00003.bin",
439
+ "model.layers.31.feed_forward.w1.qzeros": "pytorch_model-00003-of-00003.bin",
440
+ "model.layers.31.feed_forward.w1.scales": "pytorch_model-00003-of-00003.bin",
441
+ "model.layers.31.feed_forward.w2.qweight": "pytorch_model-00003-of-00003.bin",
442
+ "model.layers.31.feed_forward.w2.qzeros": "pytorch_model-00003-of-00003.bin",
443
+ "model.layers.31.feed_forward.w2.scales": "pytorch_model-00003-of-00003.bin",
444
+ "model.layers.31.feed_forward.w3.qweight": "pytorch_model-00003-of-00003.bin",
445
+ "model.layers.31.feed_forward.w3.qzeros": "pytorch_model-00003-of-00003.bin",
446
+ "model.layers.31.feed_forward.w3.scales": "pytorch_model-00003-of-00003.bin",
447
+ "model.layers.31.ffn_norm.weight": "pytorch_model-00003-of-00003.bin",
448
+ "model.layers.4.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
449
+ "model.layers.4.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
450
+ "model.layers.4.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
451
+ "model.layers.4.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
452
+ "model.layers.4.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
453
+ "model.layers.4.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
454
+ "model.layers.4.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
455
+ "model.layers.4.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
456
+ "model.layers.4.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
457
+ "model.layers.4.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
458
+ "model.layers.4.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
459
+ "model.layers.4.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
460
+ "model.layers.4.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
461
+ "model.layers.4.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
462
+ "model.layers.4.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
463
+ "model.layers.4.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
464
+ "model.layers.4.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
465
+ "model.layers.5.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
466
+ "model.layers.5.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
467
+ "model.layers.5.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
468
+ "model.layers.5.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
469
+ "model.layers.5.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
470
+ "model.layers.5.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
471
+ "model.layers.5.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
472
+ "model.layers.5.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
473
+ "model.layers.5.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
474
+ "model.layers.5.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
475
+ "model.layers.5.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
476
+ "model.layers.5.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
477
+ "model.layers.5.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
478
+ "model.layers.5.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
479
+ "model.layers.5.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
480
+ "model.layers.5.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
481
+ "model.layers.5.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
482
+ "model.layers.6.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
483
+ "model.layers.6.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
484
+ "model.layers.6.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
485
+ "model.layers.6.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
486
+ "model.layers.6.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
487
+ "model.layers.6.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
488
+ "model.layers.6.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
489
+ "model.layers.6.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
490
+ "model.layers.6.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
491
+ "model.layers.6.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
492
+ "model.layers.6.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
493
+ "model.layers.6.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
494
+ "model.layers.6.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
495
+ "model.layers.6.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
496
+ "model.layers.6.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
497
+ "model.layers.6.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
498
+ "model.layers.6.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
499
+ "model.layers.7.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
500
+ "model.layers.7.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
501
+ "model.layers.7.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
502
+ "model.layers.7.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
503
+ "model.layers.7.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
504
+ "model.layers.7.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
505
+ "model.layers.7.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
506
+ "model.layers.7.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
507
+ "model.layers.7.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
508
+ "model.layers.7.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
509
+ "model.layers.7.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
510
+ "model.layers.7.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
511
+ "model.layers.7.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
512
+ "model.layers.7.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
513
+ "model.layers.7.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
514
+ "model.layers.7.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
515
+ "model.layers.7.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
516
+ "model.layers.8.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
517
+ "model.layers.8.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
518
+ "model.layers.8.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
519
+ "model.layers.8.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
520
+ "model.layers.8.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
521
+ "model.layers.8.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
522
+ "model.layers.8.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
523
+ "model.layers.8.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
524
+ "model.layers.8.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
525
+ "model.layers.8.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
526
+ "model.layers.8.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
527
+ "model.layers.8.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
528
+ "model.layers.8.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
529
+ "model.layers.8.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
530
+ "model.layers.8.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
531
+ "model.layers.8.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
532
+ "model.layers.8.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
533
+ "model.layers.9.attention.wo.qweight": "pytorch_model-00001-of-00003.bin",
534
+ "model.layers.9.attention.wo.qzeros": "pytorch_model-00001-of-00003.bin",
535
+ "model.layers.9.attention.wo.scales": "pytorch_model-00001-of-00003.bin",
536
+ "model.layers.9.attention.wqkv.qweight": "pytorch_model-00001-of-00003.bin",
537
+ "model.layers.9.attention.wqkv.qzeros": "pytorch_model-00001-of-00003.bin",
538
+ "model.layers.9.attention.wqkv.scales": "pytorch_model-00001-of-00003.bin",
539
+ "model.layers.9.attention_norm.weight": "pytorch_model-00001-of-00003.bin",
540
+ "model.layers.9.feed_forward.w1.qweight": "pytorch_model-00001-of-00003.bin",
541
+ "model.layers.9.feed_forward.w1.qzeros": "pytorch_model-00001-of-00003.bin",
542
+ "model.layers.9.feed_forward.w1.scales": "pytorch_model-00001-of-00003.bin",
543
+ "model.layers.9.feed_forward.w2.qweight": "pytorch_model-00001-of-00003.bin",
544
+ "model.layers.9.feed_forward.w2.qzeros": "pytorch_model-00001-of-00003.bin",
545
+ "model.layers.9.feed_forward.w2.scales": "pytorch_model-00001-of-00003.bin",
546
+ "model.layers.9.feed_forward.w3.qweight": "pytorch_model-00001-of-00003.bin",
547
+ "model.layers.9.feed_forward.w3.qzeros": "pytorch_model-00001-of-00003.bin",
548
+ "model.layers.9.feed_forward.w3.scales": "pytorch_model-00001-of-00003.bin",
549
+ "model.layers.9.ffn_norm.weight": "pytorch_model-00001-of-00003.bin",
550
+ "model.norm.weight": "pytorch_model-00003-of-00003.bin",
551
+ "model.tok_embeddings.weight": "pytorch_model-00001-of-00003.bin",
552
+ "output.weight": "pytorch_model-00003-of-00003.bin"
553
+ }
554
+ }
model_repository/preprocessing/1/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
model_repository/preprocessing/1/tokenizer/tokenization_internlm.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for IntermLM."""
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+ from transformers.tokenization_utils import PreTrainedTokenizer
28
+ from transformers.utils import logging
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
33
+
34
+ PRETRAINED_VOCAB_FILES_MAP = {}
35
+
36
+
37
+ class InternLMTokenizer(PreTrainedTokenizer):
38
+ """
39
+ Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ Path to the vocabulary file.
44
+ """
45
+
46
+ vocab_files_names = VOCAB_FILES_NAMES
47
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
48
+ model_input_names = ["input_ids", "attention_mask"]
49
+ _auto_class = "AutoTokenizer"
50
+
51
+ def __init__(
52
+ self,
53
+ vocab_file,
54
+ unk_token="<unk>",
55
+ bos_token="<s>",
56
+ eos_token="</s>",
57
+ pad_token="</s>",
58
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
59
+ add_bos_token=True,
60
+ add_eos_token=False,
61
+ decode_with_prefix_space=False,
62
+ clean_up_tokenization_spaces=False,
63
+ **kwargs,
64
+ ):
65
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
66
+ self.vocab_file = vocab_file
67
+ self.add_bos_token = add_bos_token
68
+ self.add_eos_token = add_eos_token
69
+ self.decode_with_prefix_space = decode_with_prefix_space
70
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
71
+ self.sp_model.Load(vocab_file)
72
+ self._no_prefix_space_tokens = None
73
+ super().__init__(
74
+ bos_token=bos_token,
75
+ eos_token=eos_token,
76
+ unk_token=unk_token,
77
+ pad_token=pad_token,
78
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
79
+ **kwargs,
80
+ )
81
+
82
+ """ Initialization"""
83
+
84
+ @property
85
+ def no_prefix_space_tokens(self):
86
+ if self._no_prefix_space_tokens is None:
87
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
88
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
89
+ return self._no_prefix_space_tokens
90
+
91
+ @property
92
+ def vocab_size(self):
93
+ """Returns vocab size"""
94
+ return self.sp_model.get_piece_size()
95
+
96
+ @property
97
+ def bos_token_id(self) -> Optional[int]:
98
+ return self.sp_model.bos_id()
99
+
100
+ @property
101
+ def eos_token_id(self) -> Optional[int]:
102
+ return self.sp_model.eos_id()
103
+
104
+ def get_vocab(self):
105
+ """Returns vocab as a dict"""
106
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
107
+ vocab.update(self.added_tokens_encoder)
108
+ return vocab
109
+
110
+ def _tokenize(self, text):
111
+ """Returns a tokenized string."""
112
+ return self.sp_model.encode(text, out_type=str)
113
+
114
+ def _convert_token_to_id(self, token):
115
+ """Converts a token (str) in an id using the vocab."""
116
+ return self.sp_model.piece_to_id(token)
117
+
118
+ def _convert_id_to_token(self, index):
119
+ """Converts an index (integer) in a token (str) using the vocab."""
120
+ token = self.sp_model.IdToPiece(index)
121
+ return token
122
+
123
+ def _maybe_add_prefix_space(self, tokens, decoded):
124
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
125
+ return " " + decoded
126
+ else:
127
+ return decoded
128
+
129
+ def convert_tokens_to_string(self, tokens):
130
+ """Converts a sequence of tokens (string) in a single string."""
131
+ current_sub_tokens = []
132
+ out_string = ""
133
+ prev_is_special = False
134
+ for token in tokens:
135
+ # make sure that special tokens are not decoded using sentencepiece model
136
+ if token in self.all_special_tokens:
137
+ if not prev_is_special:
138
+ out_string += " "
139
+ out_string += self.sp_model.decode(current_sub_tokens) + token
140
+ prev_is_special = True
141
+ current_sub_tokens = []
142
+ else:
143
+ current_sub_tokens.append(token)
144
+ prev_is_special = False
145
+ out_string += self.sp_model.decode(current_sub_tokens)
146
+ out_string = self.clean_up_tokenization(out_string)
147
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
148
+ return out_string[1:]
149
+
150
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
151
+ """
152
+ Save the vocabulary and special tokens file to a directory.
153
+
154
+ Args:
155
+ save_directory (`str`):
156
+ The directory in which to save the vocabulary.
157
+
158
+ Returns:
159
+ `Tuple(str)`: Paths to the files saved.
160
+ """
161
+ if not os.path.isdir(save_directory):
162
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
163
+ return
164
+ out_vocab_file = os.path.join(
165
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
166
+ )
167
+
168
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
169
+ copyfile(self.vocab_file, out_vocab_file)
170
+ elif not os.path.isfile(self.vocab_file):
171
+ with open(out_vocab_file, "wb") as fi:
172
+ content_spiece_model = self.sp_model.serialized_model_proto()
173
+ fi.write(content_spiece_model)
174
+
175
+ return (out_vocab_file,)
176
+
177
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
178
+ if self.add_bos_token:
179
+ bos_token_ids = [self.bos_token_id]
180
+ else:
181
+ bos_token_ids = []
182
+
183
+ output = bos_token_ids + token_ids_0
184
+
185
+ if token_ids_1 is not None:
186
+ output = output + token_ids_1
187
+
188
+ if self.add_eos_token:
189
+ output = output + [self.eos_token_id]
190
+
191
+ return output
192
+
193
+ def get_special_tokens_mask(
194
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
195
+ ) -> List[int]:
196
+ """
197
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
198
+ special tokens using the tokenizer `prepare_for_model` method.
199
+
200
+ Args:
201
+ token_ids_0 (`List[int]`):
202
+ List of IDs.
203
+ token_ids_1 (`List[int]`, *optional*):
204
+ Optional second list of IDs for sequence pairs.
205
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
206
+ Whether or not the token list is already formatted with special tokens for the model.
207
+
208
+ Returns:
209
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
210
+ """
211
+ if already_has_special_tokens:
212
+ return super().get_special_tokens_mask(
213
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
214
+ )
215
+
216
+ if token_ids_1 is None:
217
+ return [1] + ([0] * len(token_ids_0)) + [1]
218
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
219
+
220
+ def create_token_type_ids_from_sequences(
221
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
222
+ ) -> List[int]:
223
+ """
224
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
225
+ use of token type ids, therefore a list of zeros is returned.
226
+
227
+ Args:
228
+ token_ids_0 (`List[int]`):
229
+ List of IDs.
230
+ token_ids_1 (`List[int]`, *optional*):
231
+ Optional second list of IDs for sequence pairs.
232
+
233
+ Returns:
234
+ `List[int]`: List of zeros.
235
+ """
236
+ eos = [self.eos_token_id]
237
+
238
+ if token_ids_1 is None:
239
+ return len(token_ids_0 + eos) * [0]
240
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
model_repository/preprocessing/1/tokenizer/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
model_repository/preprocessing/1/tokenizer/tokenizer.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import json
3
+ import os
4
+ import os.path as osp
5
+ from collections import deque
6
+ from typing import List, Optional, Sequence, Union
7
+
8
+ import torch
9
+
10
+ from lmdeploy.utils import get_logger
11
+
12
+ # this file will be copied to triton server, make sure all
13
+ # importing are starting from the package root lmdeploy
14
+
15
+
16
+ class SentencePieceTokenizer:
17
+ """Tokenizer of sentencepiece.
18
+
19
+ Args:
20
+ model_file (str): the path of the tokenizer model
21
+ """
22
+
23
+ def __init__(self, model_file: str):
24
+ from sentencepiece import SentencePieceProcessor
25
+ self.model = SentencePieceProcessor(model_file=model_file)
26
+ self._prefix_space_tokens = None
27
+ # for stop words
28
+ self._maybe_decode_bytes: bool = None
29
+ # TODO maybe lack a constant.py
30
+ self._indexes_tokens_deque = deque(maxlen=10)
31
+ self.max_indexes_num = 5
32
+ self.logger = get_logger('lmdeploy')
33
+
34
+ @property
35
+ def vocab_size(self):
36
+ """vocabulary size."""
37
+ return self.model.vocab_size()
38
+
39
+ @property
40
+ def bos_token_id(self):
41
+ """begine of the sentence token id."""
42
+ return self.model.bos_id()
43
+
44
+ @property
45
+ def eos_token_id(self):
46
+ """end of the sentence token id."""
47
+ return self.model.eos_id()
48
+
49
+ @property
50
+ def prefix_space_tokens(self):
51
+ """tokens without prefix space."""
52
+ if self._prefix_space_tokens is None:
53
+ vocab = self.model.IdToPiece(list(range(self.vocab_size)))
54
+ self._prefix_space_tokens = {
55
+ i
56
+ for i, tok in enumerate(vocab) if tok.startswith('▁')
57
+ }
58
+ return self._prefix_space_tokens
59
+
60
+ def _maybe_add_prefix_space(self, tokens, decoded):
61
+ """maybe add prefix space for incremental decoding."""
62
+ if len(tokens) and not decoded.startswith(' ') and\
63
+ tokens[0] in self.prefix_space_tokens:
64
+ return ' ' + decoded
65
+ else:
66
+ return decoded
67
+
68
+ def indexes_containing_token(self, token: str):
69
+ """Return all the possible indexes, whose decoding output may contain
70
+ the input token."""
71
+ # traversing vocab is time consuming, can not be accelerated with
72
+ # multi threads (computation) or multi process (can't pickle tokenizer)
73
+ # so, we maintain latest 10 stop words and return directly if matched
74
+ for _token, _indexes in self._indexes_tokens_deque:
75
+ if token == _token:
76
+ return _indexes
77
+ if token == ' ': # ' ' is special
78
+ token = '▁'
79
+ vocab = self.model.IdToPiece(list(range(self.vocab_size)))
80
+ indexes = [i for i, voc in enumerate(vocab) if token in voc]
81
+ if len(indexes) > self.max_indexes_num:
82
+ indexes = self.encode(token, add_bos=False)[-1:]
83
+ self.logger.warning(
84
+ f'There are too many(>{self.max_indexes_num}) possible '
85
+ f'indexes may decoding {token}, we will use {indexes} only')
86
+ self._indexes_tokens_deque.append((token, indexes))
87
+ return indexes
88
+
89
+ def encode(self, s: str, add_bos: bool = True, **kwargs):
90
+ """Tokenize a prompt.
91
+
92
+ Args:
93
+ s (str): a prompt
94
+ Returns:
95
+ list[int]: token ids
96
+ """
97
+ return self.model.Encode(s, add_bos=add_bos, **kwargs)
98
+
99
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
100
+ """De-tokenize.
101
+
102
+ Args:
103
+ t (List[int]): a list of token ids
104
+ offset (int): for incrementally decoding. Default to None, which
105
+ means not applied.
106
+ Returns:
107
+ str: text of decoding tokens
108
+ """
109
+ if isinstance(t, torch.Tensor):
110
+ t = t.tolist()
111
+ t = t[offset:]
112
+ out_string = self.model.Decode(t)
113
+ if offset:
114
+ out_string = self._maybe_add_prefix_space(t, out_string)
115
+ return out_string
116
+
117
+ def __call__(self, s: Union[str, Sequence[str]]):
118
+ """Tokenize prompts.
119
+
120
+ Args:
121
+ s (str): prompts
122
+ Returns:
123
+ list[int]: token ids
124
+ """
125
+ import addict
126
+ add_bos = False
127
+ add_eos = False
128
+
129
+ input_ids = self.model.Encode(s, add_bos=add_bos, add_eos=add_eos)
130
+ return addict.Addict(input_ids=input_ids)
131
+
132
+
133
+ class HuggingFaceTokenizer:
134
+ """Tokenizer of sentencepiece.
135
+
136
+ Args:
137
+ model_dir (str): the directory of the tokenizer model
138
+ """
139
+
140
+ def __init__(self, model_dir: str):
141
+ from transformers import AutoTokenizer
142
+ model_file = osp.join(model_dir, 'tokenizer.model')
143
+ backend_tokenizer_file = osp.join(model_dir, 'tokenizer.json')
144
+ model_file_exists = osp.exists(model_file)
145
+ self.logger = get_logger('lmdeploy')
146
+ if not osp.exists(backend_tokenizer_file) and model_file_exists:
147
+ self.logger.warning(
148
+ 'Can not find tokenizer.json. '
149
+ 'It may take long time to initialize the tokenizer.')
150
+ self.model = AutoTokenizer.from_pretrained(model_dir,
151
+ trust_remote_code=True)
152
+ self._prefix_space_tokens = None
153
+ # save tokenizer.json to reuse
154
+ if not osp.exists(backend_tokenizer_file) and model_file_exists:
155
+ if hasattr(self.model, 'backend_tokenizer'):
156
+ if os.access(model_dir, os.W_OK):
157
+ self.model.backend_tokenizer.save(backend_tokenizer_file)
158
+
159
+ if self.model.eos_token_id is None:
160
+ generation_config_file = osp.join(model_dir,
161
+ 'generation_config.json')
162
+ if osp.exists(generation_config_file):
163
+ with open(generation_config_file, 'r') as f:
164
+ cfg = json.load(f)
165
+ self.model.eos_token_id = cfg['eos_token_id']
166
+ elif hasattr(self.model, 'eod_id'): # Qwen remote
167
+ self.model.eos_token_id = self.model.eod_id
168
+
169
+ # for stop words
170
+ self._vocab_size_with_added: int = None
171
+ self._maybe_decode_bytes: bool = None
172
+ # TODO maybe lack a constant.py
173
+ self._indexes_tokens_deque = deque(maxlen=10)
174
+ self.max_indexes_num = 5
175
+ self.token2id = {}
176
+
177
+ @property
178
+ def vocab_size(self):
179
+ """vocabulary size."""
180
+ return self.model.vocab_size
181
+
182
+ @property
183
+ def vocab_size_with_added(self):
184
+ """vocabulary size with added vocab."""
185
+ if self._vocab_size_with_added is not None:
186
+ return self._vocab_size_with_added
187
+ self._vocab_size_with_added = len(self.model.get_vocab())
188
+ return self._vocab_size_with_added
189
+
190
+ @property
191
+ def bos_token_id(self):
192
+ """begine of the sentence token id."""
193
+ return self.model.bos_token_id
194
+
195
+ @property
196
+ def eos_token_id(self):
197
+ """end of the sentence token id."""
198
+ return self.model.eos_token_id
199
+
200
+ @property
201
+ def prefix_space_tokens(self):
202
+ """tokens without prefix space."""
203
+ if self._prefix_space_tokens is None:
204
+ vocab = self.model.convert_ids_to_tokens(
205
+ list(range(self.vocab_size)))
206
+ self._prefix_space_tokens = {
207
+ i
208
+ for i, tok in enumerate(vocab)
209
+ if tok.startswith('▁' if isinstance(tok, str) else b' ')
210
+ }
211
+ return self._prefix_space_tokens
212
+
213
+ def _maybe_add_prefix_space(self, tokens: List[int], decoded: str):
214
+ """maybe add prefix space for incremental decoding."""
215
+ if len(tokens) and not decoded.startswith(' ') and\
216
+ tokens[0] in self.prefix_space_tokens:
217
+ return ' ' + decoded
218
+ else:
219
+ return decoded
220
+
221
+ @property
222
+ def maybe_decode_bytes(self):
223
+ """Check if self.model.convert_ids_to_tokens return not a str value."""
224
+ if self._maybe_decode_bytes is None:
225
+ self._maybe_decode_bytes = False
226
+ vocab = self.model.convert_ids_to_tokens(
227
+ list(range(self.vocab_size)))
228
+ for tok in vocab:
229
+ if not isinstance(tok, str):
230
+ self._maybe_decode_bytes = True
231
+ break
232
+ return self._maybe_decode_bytes
233
+
234
+ def indexes_containing_token(self, token: str):
235
+ """Return all the possible indexes, whose decoding output may contain
236
+ the input token."""
237
+ # traversing vocab is time consuming, can not be accelerated with
238
+ # multi threads (computation) or multi process (can't pickle tokenizer)
239
+ # so, we maintain latest 10 stop words and return directly if matched
240
+ for _token, _indexes in self._indexes_tokens_deque:
241
+ if token == _token:
242
+ return _indexes
243
+
244
+ if self.token2id == {}:
245
+ # decode is slower than convert_ids_to_tokens
246
+ if self.maybe_decode_bytes:
247
+ self.token2id = {
248
+ self.model.decode(i): i
249
+ for i in range(self.vocab_size)
250
+ }
251
+ else:
252
+ self.token2id = {
253
+ self.model.convert_ids_to_tokens(i): i
254
+ for i in range(self.vocab_size)
255
+ }
256
+ if token == ' ': # ' ' is special
257
+ token = '▁'
258
+ indexes = [i for _token, i in self.token2id.items() if token in _token]
259
+ if len(indexes) > self.max_indexes_num:
260
+ indexes = self.encode(token, add_bos=False)[-1:]
261
+ self.logger.warning(
262
+ f'There are too many(>{self.max_indexes_num}) possible '
263
+ f'indexes may decoding {token}, we will use {indexes} only')
264
+ # there might be token id that exceeds self.vocab_size
265
+ if len(indexes) == 0:
266
+ indexes = self.encode(token, False)
267
+ if len(indexes) != 1:
268
+ self.logger.warning(
269
+ f'The token {token}, its length of indexes {indexes} is '
270
+ 'not 1. Currently, it can not be used as stop words')
271
+ indexes = []
272
+ self._indexes_tokens_deque.append((token, indexes))
273
+ return indexes
274
+
275
+ def encode(self, s: str, add_bos: bool = True, **kwargs):
276
+ """Tokenize a prompt.
277
+
278
+ Args:
279
+ s (str): a prompt
280
+ Returns:
281
+ list[int]: token ids
282
+ """
283
+ encoded = self.model.encode(s, **kwargs)
284
+ if not add_bos:
285
+ # in the middle of a session
286
+ if len(encoded) and encoded[0] == self.bos_token_id:
287
+ encoded = encoded[1:]
288
+ return encoded
289
+
290
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
291
+ """De-tokenize.
292
+
293
+ Args:
294
+ t (List[int]): a list of token ids
295
+ offset (int): for incrementally decoding. Default to None, which
296
+ means not applied.
297
+ Returns:
298
+ str: text of decoding tokens
299
+ """
300
+ skip_special_tokens = True
301
+ t = t[offset:]
302
+ out_string = self.model.decode(t,
303
+ skip_special_tokens=skip_special_tokens)
304
+ if offset:
305
+ out_string = self._maybe_add_prefix_space(t, out_string)
306
+ return out_string
307
+
308
+ def __call__(self, s: Union[str, Sequence[str]]):
309
+ """Tokenize prompts.
310
+
311
+ Args:
312
+ s (str): prompts
313
+ Returns:
314
+ list[int]: token ids
315
+ """
316
+ add_special_tokens = False
317
+ return self.model(s, add_special_tokens=add_special_tokens)
318
+
319
+
320
+ class Tokenizer:
321
+ """Tokenize prompts or de-tokenize tokens into texts.
322
+
323
+ Args:
324
+ model_file (str): the path of the tokenizer model
325
+ """
326
+
327
+ def __init__(self, model_file: str):
328
+ if model_file.endswith('.model'):
329
+ model_folder = osp.split(model_file)[0]
330
+ else:
331
+ model_folder = model_file
332
+ model_file = osp.join(model_folder, 'tokenizer.model')
333
+ tokenizer_config_file = osp.join(model_folder, 'tokenizer_config.json')
334
+
335
+ model_file_exists = osp.exists(model_file)
336
+ config_exists = osp.exists(tokenizer_config_file)
337
+ use_hf_model = config_exists or not model_file_exists
338
+ self.logger = get_logger('lmdeploy')
339
+ if not use_hf_model:
340
+ self.model = SentencePieceTokenizer(model_file)
341
+ else:
342
+ self.model = HuggingFaceTokenizer(model_folder)
343
+
344
+ @property
345
+ def vocab_size(self):
346
+ """vocabulary size."""
347
+ return self.model.vocab_size
348
+
349
+ @property
350
+ def bos_token_id(self):
351
+ """begine of the sentence token id."""
352
+ return self.model.bos_token_id
353
+
354
+ @property
355
+ def eos_token_id(self):
356
+ """end of the sentence token id."""
357
+ return self.model.eos_token_id
358
+
359
+ def encode(self, s: str, add_bos: bool = True, **kwargs):
360
+ """Tokenize a prompt.
361
+
362
+ Args:
363
+ s (str): a prompt
364
+ Returns:
365
+ list[int]: token ids
366
+ """
367
+ return self.model.encode(s, add_bos, **kwargs)
368
+
369
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
370
+ """De-tokenize.
371
+
372
+ Args:
373
+ t (List[int]): a list of token ids
374
+ offset (int): for incrementally decoding. Default to None, which
375
+ means not applied.
376
+ Returns:
377
+ str: text of decoding tokens
378
+ """
379
+ return self.model.decode(t, offset)
380
+
381
+ def __call__(self, s: Union[str, Sequence[str]]):
382
+ """Tokenize prompts.
383
+
384
+ Args:
385
+ s (str): prompts
386
+ Returns:
387
+ list[int]: token ids
388
+ """
389
+ return self.model(s)
390
+
391
+ def indexes_containing_token(self, token):
392
+ """Return all the possible indexes, whose decoding output may contain
393
+ the input token."""
394
+ encoded = self.encode(token, add_bos=False)
395
+ if len(encoded) > 1:
396
+ self.logger.warning(
397
+ f'The token {token}, its length of indexes {encoded} is over '
398
+ 'than 1. Currently, it can not be used as stop words')
399
+ return []
400
+ return self.model.indexes_containing_token(token)
model_repository/preprocessing/1/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92538": {
28
+ "content": "<|plugin|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92539": {
36
+ "content": "<|interpreter|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92540": {
44
+ "content": "<|action_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92541": {
52
+ "content": "<|action_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92542": {
60
+ "content": "<|im_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92543": {
68
+ "content": "<|im_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ }
75
+ },
76
+ "auto_map": {
77
+ "AutoTokenizer": [
78
+ "tokenization_internlm.InternLMTokenizer",
79
+ null
80
+ ]
81
+ },
82
+ "bos_token": "<s>",
83
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
84
+ "clean_up_tokenization_spaces": false,
85
+ "eos_token": "</s>",
86
+ "model_max_length": 1000000000000000019884624838656,
87
+ "pad_token": "</s>",
88
+ "tokenizer_class": "InternLMTokenizer",
89
+ "unk_token": "<unk>"
90
+ }
model_repository/preprocessing/config.pbtxt ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "preprocessing"
2
+ backend: "python"
3
+ max_batch_size: 1
4
+
5
+ input [
6
+ {
7
+ name: "QUERY"
8
+ data_type: TYPE_STRING
9
+ dims: [ -1 ]
10
+ }
11
+ ]
12
+ output [
13
+ {
14
+ name: "INPUT_ID"
15
+ data_type: TYPE_UINT32
16
+ dims: [ -1 ]
17
+ },
18
+ {
19
+ name: "REQUEST_INPUT_LEN"
20
+ data_type: TYPE_UINT32
21
+ dims: [ 1 ]
22
+ }
23
+ ]
24
+
25
+ instance_group [
26
+ {
27
+ count: 4
28
+ kind: KIND_CPU
29
+ }
30
+ ]
31
+
32
+ parameters {
33
+ key: "tokenizer_path"
34
+ value: {
35
+ string_value: "tokenizer/tokenizer.model"
36
+ }
37
+ }
model_repository/turbomind/1/placeholder ADDED
File without changes
model_repository/turbomind/1/weights/config.ini ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c8358cd3fffcb86829f6b600bdd0ba77b6147eed572f88700ec4d914db070d6
3
+ size 645
model_repository/turbomind/1/weights/layers.0.attention.w_qkv.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1763929a6e7bbdafdb81d39ebfa08263351ccea12347aa68b292b1b7c458e45
3
+ size 12582912
model_repository/turbomind/1/weights/layers.0.attention.w_qkv.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed40e83191f5304fd2df93ff5b90ae9a165bbe489af8020e06948fbbb289d7d
3
+ size 786432
model_repository/turbomind/1/weights/layers.0.attention.wo.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6710235be94402052aaaae809e488f433d75d6d33acf546e2d0bf7aae4d8f0f
3
+ size 8388608
model_repository/turbomind/1/weights/layers.0.attention.wo.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c069c91ef3a796ac2e9e0230319fabb6bc8433c68284c6e5ca71baa477a3438
3
+ size 524288
model_repository/turbomind/1/weights/layers.0.attention_norm.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dde3cfe82d02d87660f40c667186249cd17a5ee5924ab2a3ea0385919a2d0f3b
3
+ size 8192
model_repository/turbomind/1/weights/layers.0.feed_forward.w13.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26bc912102aa2b487baf312f3bfd8f97dc46ba6761c2328bfd3e45581bfbcfd4
3
+ size 58720256
model_repository/turbomind/1/weights/layers.0.feed_forward.w13.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:309c93937a8778e4e4dce879efd1e0673f4bb7701644628abbaa8420e5b24cf0
3
+ size 3670016
model_repository/turbomind/1/weights/layers.0.feed_forward.w2.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d096d08769d4b05f7483b4ed024224e0d4d35772231e757157e69c9c0dc1c6ef
3
+ size 29360128
model_repository/turbomind/1/weights/layers.0.feed_forward.w2.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdb73c0a0f614f1033850266d6ff4311374557a2653e0fa7857f8507ca87058e
3
+ size 1835008
model_repository/turbomind/1/weights/layers.0.ffn_norm.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5b414270e0d50fbec62cdab6ecd217c2f688872d5ed7d9f91bb75dfff46651b
3
+ size 8192
model_repository/turbomind/1/weights/layers.0.past_kv_scale.0.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25f7250671024d0129c45c3f3d8f57887921d219c280350697d41e9170925c77
3
+ size 16
model_repository/turbomind/1/weights/layers.1.attention.w_qkv.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a125e82d7ee989858902abca2bec9dc3f4ad74008f5307a1e7a635d148c53f3a
3
+ size 12582912
model_repository/turbomind/1/weights/layers.1.attention.w_qkv.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f96d91127194d8a8404809f81602727e59903c86473ee27012bb303f83cdf77
3
+ size 786432
model_repository/turbomind/1/weights/layers.1.attention.wo.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4905342d79812e6bd9d6d993443ee6b30df2f80cef44176d1398dc884c458bad
3
+ size 8388608
model_repository/turbomind/1/weights/layers.1.attention.wo.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c7971bdedd76bbe5630fd97b2badbdd26d22055ffe6fe0374fff051af9feb80
3
+ size 524288
model_repository/turbomind/1/weights/layers.1.attention_norm.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d589a6b27b707580d37c4b198dc952071bb1a34967ebd9175f9055ac012bc781
3
+ size 8192
model_repository/turbomind/1/weights/layers.1.feed_forward.w13.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dd761cf75a1f95c5a55a245fbe1a8bca8967be0d7a03dd12108d0be835d7682
3
+ size 58720256
model_repository/turbomind/1/weights/layers.1.feed_forward.w13.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d4fdfeee03517f7896aadab5adec50c8449a2e1bda2f0cf5b8725b26057d1f6
3
+ size 3670016
model_repository/turbomind/1/weights/layers.1.feed_forward.w2.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c42be27fe2e9f48473b5cc4ec63cd06575ade857ea8699b4bd05eb4f801dc6
3
+ size 29360128
model_repository/turbomind/1/weights/layers.1.feed_forward.w2.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe915a8697f98fe80270d235325b469219fac1c8a4529052fd15f6b1ee8f13e6
3
+ size 1835008