x54-729 commited on
Commit
f17f278
1 Parent(s): 912eeb0

keep internlm2 only

Browse files
Files changed (2) hide show
  1. configuration_internlm.py +0 -164
  2. tokenization_internlm.py +0 -240
configuration_internlm.py DELETED
@@ -1,164 +0,0 @@
1
- # coding=utf-8
2
- # Copyright (c) InternLM. All rights reserved.
3
- #
4
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
- # and OPT implementations in this library. It has been modified from its
6
- # original forms to accommodate minor architectural differences compared
7
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
- """ InternLM model configuration"""
21
-
22
- from transformers.configuration_utils import PretrainedConfig
23
- from transformers.utils import logging
24
-
25
- logger = logging.get_logger(__name__)
26
-
27
- INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
28
-
29
-
30
- class InternLMConfig(PretrainedConfig):
31
- r"""
32
- This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
- an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
- configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
35
-
36
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
- documentation from [`PretrainedConfig`] for more information.
38
-
39
-
40
- Args:
41
- vocab_size (`int`, *optional*, defaults to 32000):
42
- Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
43
- `inputs_ids` passed when calling [`InternLMModel`]
44
- hidden_size (`int`, *optional*, defaults to 4096):
45
- Dimension of the hidden representations.
46
- intermediate_size (`int`, *optional*, defaults to 11008):
47
- Dimension of the MLP representations.
48
- num_hidden_layers (`int`, *optional*, defaults to 32):
49
- Number of hidden layers in the Transformer encoder.
50
- num_attention_heads (`int`, *optional*, defaults to 32):
51
- Number of attention heads for each attention layer in the Transformer encoder.
52
- num_key_value_heads (`int`, *optional*):
53
- This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
- `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
- `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
- converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
- by meanpooling all the original heads within that group. For more details checkout [this
58
- paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
59
- `num_attention_heads`.
60
- hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
- The non-linear activation function (function or string) in the decoder.
62
- max_position_embeddings (`int`, *optional*, defaults to 2048):
63
- The maximum sequence length that this model might ever be used with. Typically set this to something large
64
- just in case (e.g., 512 or 1024 or 2048).
65
- initializer_range (`float`, *optional*, defaults to 0.02):
66
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
- rms_norm_eps (`float`, *optional*, defaults to 1e-12):
68
- The epsilon used by the rms normalization layers.
69
- use_cache (`bool`, *optional*, defaults to `True`):
70
- Whether or not the model should return the last key/values attentions (not used by all models). Only
71
- relevant if `config.is_decoder=True`.
72
- tie_word_embeddings(`bool`, *optional*, defaults to `False`):
73
- Whether to tie weight embeddings
74
- Example:
75
-
76
- ```python
77
- >>> from transformers import InternLMModel, InternLMConfig
78
-
79
- >>> # Initializing a InternLM internlm-7b style configuration
80
- >>> configuration = InternLMConfig()
81
-
82
- >>> # Initializing a model from the internlm-7b style configuration
83
- >>> model = InternLMModel(configuration)
84
-
85
- >>> # Accessing the model configuration
86
- >>> configuration = model.config
87
- ```"""
88
- model_type = "internlm"
89
- _auto_class = "AutoConfig"
90
-
91
- def __init__( # pylint: disable=W0102
92
- self,
93
- vocab_size=103168,
94
- hidden_size=4096,
95
- intermediate_size=11008,
96
- num_hidden_layers=32,
97
- num_attention_heads=32,
98
- num_key_value_heads=None,
99
- hidden_act="silu",
100
- max_position_embeddings=2048,
101
- initializer_range=0.02,
102
- rms_norm_eps=1e-6,
103
- use_cache=True,
104
- pad_token_id=0,
105
- bos_token_id=1,
106
- eos_token_id=2,
107
- tie_word_embeddings=False,
108
- bias=True,
109
- rope_theta=10000,
110
- rope_scaling=None,
111
- attn_implementation="eager",
112
- **kwargs,
113
- ):
114
- self.vocab_size = vocab_size
115
- self.max_position_embeddings = max_position_embeddings
116
- self.hidden_size = hidden_size
117
- self.intermediate_size = intermediate_size
118
- self.num_hidden_layers = num_hidden_layers
119
- self.num_attention_heads = num_attention_heads
120
- self.bias = bias
121
-
122
- if num_key_value_heads is None:
123
- num_key_value_heads = num_attention_heads
124
- self.num_key_value_heads = num_key_value_heads
125
-
126
- self.hidden_act = hidden_act
127
- self.initializer_range = initializer_range
128
- self.rms_norm_eps = rms_norm_eps
129
- self.use_cache = use_cache
130
- self.rope_theta = rope_theta
131
- self.rope_scaling = rope_scaling
132
- self._rope_scaling_validation()
133
-
134
- self.attn_implementation = attn_implementation
135
- if self.attn_implementation is None:
136
- self.attn_implementation = "eager"
137
- super().__init__(
138
- pad_token_id=pad_token_id,
139
- bos_token_id=bos_token_id,
140
- eos_token_id=eos_token_id,
141
- tie_word_embeddings=tie_word_embeddings,
142
- **kwargs,
143
- )
144
-
145
- def _rope_scaling_validation(self):
146
- """
147
- Validate the `rope_scaling` configuration.
148
- """
149
- if self.rope_scaling is None:
150
- return
151
-
152
- if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
153
- raise ValueError(
154
- "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
155
- f"got {self.rope_scaling}"
156
- )
157
- rope_scaling_type = self.rope_scaling.get("type", None)
158
- rope_scaling_factor = self.rope_scaling.get("factor", None)
159
- if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
160
- raise ValueError(
161
- f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
162
- )
163
- if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
164
- raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tokenization_internlm.py DELETED
@@ -1,240 +0,0 @@
1
- # coding=utf-8
2
- # Copyright (c) InternLM. All rights reserved.
3
- #
4
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
- # and OPT implementations in this library. It has been modified from its
6
- # original forms to accommodate minor architectural differences compared
7
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
-
21
- """Tokenization classes for IntermLM."""
22
- import os
23
- from shutil import copyfile
24
- from typing import Any, Dict, List, Optional, Tuple
25
-
26
- import sentencepiece as spm
27
- from transformers.tokenization_utils import PreTrainedTokenizer
28
- from transformers.utils import logging
29
-
30
- logger = logging.get_logger(__name__)
31
-
32
- VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
33
-
34
- PRETRAINED_VOCAB_FILES_MAP = {}
35
-
36
-
37
- class InternLMTokenizer(PreTrainedTokenizer):
38
- """
39
- Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
40
-
41
- Args:
42
- vocab_file (`str`):
43
- Path to the vocabulary file.
44
- """
45
-
46
- vocab_files_names = VOCAB_FILES_NAMES
47
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
48
- model_input_names = ["input_ids", "attention_mask"]
49
- _auto_class = "AutoTokenizer"
50
-
51
- def __init__(
52
- self,
53
- vocab_file,
54
- unk_token="<unk>",
55
- bos_token="<s>",
56
- eos_token="</s>",
57
- pad_token="</s>",
58
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
59
- add_bos_token=True,
60
- add_eos_token=False,
61
- decode_with_prefix_space=False,
62
- clean_up_tokenization_spaces=False,
63
- **kwargs,
64
- ):
65
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
66
- self.vocab_file = vocab_file
67
- self.add_bos_token = add_bos_token
68
- self.add_eos_token = add_eos_token
69
- self.decode_with_prefix_space = decode_with_prefix_space
70
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
71
- self.sp_model.Load(vocab_file)
72
- self._no_prefix_space_tokens = None
73
- super().__init__(
74
- bos_token=bos_token,
75
- eos_token=eos_token,
76
- unk_token=unk_token,
77
- pad_token=pad_token,
78
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
79
- **kwargs,
80
- )
81
-
82
- """ Initialization"""
83
-
84
- @property
85
- def no_prefix_space_tokens(self):
86
- if self._no_prefix_space_tokens is None:
87
- vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
88
- self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
89
- return self._no_prefix_space_tokens
90
-
91
- @property
92
- def vocab_size(self):
93
- """Returns vocab size"""
94
- return self.sp_model.get_piece_size()
95
-
96
- @property
97
- def bos_token_id(self) -> Optional[int]:
98
- return self.sp_model.bos_id()
99
-
100
- @property
101
- def eos_token_id(self) -> Optional[int]:
102
- return self.sp_model.eos_id()
103
-
104
- def get_vocab(self):
105
- """Returns vocab as a dict"""
106
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
107
- vocab.update(self.added_tokens_encoder)
108
- return vocab
109
-
110
- def _tokenize(self, text):
111
- """Returns a tokenized string."""
112
- return self.sp_model.encode(text, out_type=str)
113
-
114
- def _convert_token_to_id(self, token):
115
- """Converts a token (str) in an id using the vocab."""
116
- return self.sp_model.piece_to_id(token)
117
-
118
- def _convert_id_to_token(self, index):
119
- """Converts an index (integer) in a token (str) using the vocab."""
120
- token = self.sp_model.IdToPiece(index)
121
- return token
122
-
123
- def _maybe_add_prefix_space(self, tokens, decoded):
124
- if tokens and tokens[0] not in self.no_prefix_space_tokens:
125
- return " " + decoded
126
- else:
127
- return decoded
128
-
129
- def convert_tokens_to_string(self, tokens):
130
- """Converts a sequence of tokens (string) in a single string."""
131
- current_sub_tokens = []
132
- out_string = ""
133
- prev_is_special = False
134
- for token in tokens:
135
- # make sure that special tokens are not decoded using sentencepiece model
136
- if token in self.all_special_tokens:
137
- if not prev_is_special:
138
- out_string += " "
139
- out_string += self.sp_model.decode(current_sub_tokens) + token
140
- prev_is_special = True
141
- current_sub_tokens = []
142
- else:
143
- current_sub_tokens.append(token)
144
- prev_is_special = False
145
- out_string += self.sp_model.decode(current_sub_tokens)
146
- out_string = self.clean_up_tokenization(out_string)
147
- out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
148
- return out_string[1:]
149
-
150
- def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
151
- """
152
- Save the vocabulary and special tokens file to a directory.
153
-
154
- Args:
155
- save_directory (`str`):
156
- The directory in which to save the vocabulary.
157
-
158
- Returns:
159
- `Tuple(str)`: Paths to the files saved.
160
- """
161
- if not os.path.isdir(save_directory):
162
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
163
- return
164
- out_vocab_file = os.path.join(
165
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
166
- )
167
-
168
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
169
- copyfile(self.vocab_file, out_vocab_file)
170
- elif not os.path.isfile(self.vocab_file):
171
- with open(out_vocab_file, "wb") as fi:
172
- content_spiece_model = self.sp_model.serialized_model_proto()
173
- fi.write(content_spiece_model)
174
-
175
- return (out_vocab_file,)
176
-
177
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
178
- if self.add_bos_token:
179
- bos_token_ids = [self.bos_token_id]
180
- else:
181
- bos_token_ids = []
182
-
183
- output = bos_token_ids + token_ids_0
184
-
185
- if token_ids_1 is not None:
186
- output = output + token_ids_1
187
-
188
- if self.add_eos_token:
189
- output = output + [self.eos_token_id]
190
-
191
- return output
192
-
193
- def get_special_tokens_mask(
194
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
195
- ) -> List[int]:
196
- """
197
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
198
- special tokens using the tokenizer `prepare_for_model` method.
199
-
200
- Args:
201
- token_ids_0 (`List[int]`):
202
- List of IDs.
203
- token_ids_1 (`List[int]`, *optional*):
204
- Optional second list of IDs for sequence pairs.
205
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
206
- Whether or not the token list is already formatted with special tokens for the model.
207
-
208
- Returns:
209
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
210
- """
211
- if already_has_special_tokens:
212
- return super().get_special_tokens_mask(
213
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
214
- )
215
-
216
- if token_ids_1 is None:
217
- return [1] + ([0] * len(token_ids_0)) + [1]
218
- return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
219
-
220
- def create_token_type_ids_from_sequences(
221
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
222
- ) -> List[int]:
223
- """
224
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
225
- use of token type ids, therefore a list of zeros is returned.
226
-
227
- Args:
228
- token_ids_0 (`List[int]`):
229
- List of IDs.
230
- token_ids_1 (`List[int]`, *optional*):
231
- Optional second list of IDs for sequence pairs.
232
-
233
- Returns:
234
- `List[int]`: List of zeros.
235
- """
236
- eos = [self.eos_token_id]
237
-
238
- if token_ids_1 is None:
239
- return len(token_ids_0 + eos) * [0]
240
- return len(token_ids_0 + eos + token_ids_1 + eos) * [0]