pszemraj commited on
Commit
6a8b823
1 Parent(s): c891eaf

Upload tokenization_phi3_small.py

Browse files
Files changed (1) hide show
  1. tokenization_phi3_small.py +315 -0
tokenization_phi3_small.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/tokenization_qwen.py
2
+ import os
3
+ from typing import Collection, List, Optional, Dict, Set, Tuple, Union
4
+
5
+ from functools import cached_property
6
+
7
+ import base64
8
+
9
+ from transformers import PreTrainedTokenizer, AddedToken, AutoConfig
10
+ from transformers.models.auto.tokenization_auto import get_tokenizer_config
11
+ import tiktoken
12
+
13
+
14
+ """
15
+ This tokenizer is almost identical to tiktoken.get_encoding("cl100k_base")
16
+ with a few additional special tokens to support the ChatML format.
17
+
18
+ TODO(bapatra): Right now, I do not save the special tokens to the vocab file.
19
+ Maybe in the future, that would be useful? Can add that support later.
20
+
21
+ """
22
+
23
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
24
+ with open(tiktoken_bpe_file, "rb") as f:
25
+ contents = f.read()
26
+ return {
27
+ base64.b64decode(token): int(rank)
28
+ for token, rank in (line.split() for line in contents.splitlines() if line)
29
+ }
30
+
31
+ # On the megatron codebase, we pad vocabularies to ensure matrix multiplication is fast.
32
+ # this in turn causes some indices to be empty. We account for these empty indices by adding
33
+ # dummy tokens to the tokenizer.
34
+
35
+ EFFECTIVE_PADDED_VOCAB_SIZE = 100352
36
+ ACTUAL_VOCAB_SIZE = 100276
37
+
38
+
39
+ DUMMY_TOKENS = {
40
+ f"<|dummy_id_{11 + offset}|>": 100276 + offset
41
+ for offset in range(1, EFFECTIVE_PADDED_VOCAB_SIZE - ACTUAL_VOCAB_SIZE)
42
+ }
43
+
44
+ SPECIAL_TOKENS = {
45
+ # tiktoken.get_encoding("cl100k_base")._special_tokens
46
+ '<|endoftext|>': 100257,
47
+ '<|fim_prefix|>': 100258,
48
+ '<|fim_middle|>': 100259,
49
+ '<|fim_suffix|>': 100260,
50
+ # Special tokens for post-training
51
+ "<|system|>": 100261,
52
+ "<|user|>": 100262,
53
+ "<|assistant|>": 100263,
54
+ # Dummy unused tokens
55
+ "<|dummy_id_0|>": 100264,
56
+ "<|dummy_id_1|>": 100265,
57
+ # Special tokens for post-training continued
58
+ "<|end|>": 100266,
59
+ # Some dummy tokens, so that tokenization is contiguous and does not cause issues
60
+ # Note that the 100256th token of tiktoken.get_encoding("cl100k_base") does not
61
+ # actually map to anything. So we use a dummy token here.
62
+ "<|dummy_id_2|>": 100256,
63
+ # Likewise, tokens from 100267 to 100275 are also unused
64
+ "<|dummy_id_3|>": 100267,
65
+ "<|dummy_id_4|>": 100268,
66
+ "<|dummy_id_5|>": 100269,
67
+ "<|dummy_id_6|>": 100270,
68
+ "<|dummy_id_7|>": 100271,
69
+ "<|dummy_id_8|>": 100272,
70
+ "<|dummy_id_9|>": 100273,
71
+ "<|dummy_id_10|>": 100274,
72
+ "<|dummy_id_11|>": 100275,
73
+ # The final end of prompt token
74
+ # (unused, but present as a part of tiktoken.get_encoding("cl100k_base")._special_tokens)
75
+ '<|endofprompt|>': 100276,
76
+ # Dummy tokens to account for padding of the tokenizer
77
+ # We pad to ensure tensor cores are used for vocab multiplication
78
+ **DUMMY_TOKENS
79
+ }
80
+
81
+ class Phi3SmallTokenizer(PreTrainedTokenizer):
82
+ vocab_files_names = {
83
+ "vocab_file": "cl100k_base.tiktoken"
84
+ }
85
+
86
+ model_input_names: List[str] = ["input_ids", "attention_mask"]
87
+ padding_side = "left"
88
+
89
+ def __init__(
90
+ self,
91
+ vocab_file: Optional[str] = None,
92
+ errors: str = "replace",
93
+ **kwargs
94
+ ) -> None:
95
+ # PreTrainedTokenizer's init calls _add_tokens, which in turn checks
96
+ # if the token is present in `self.special_tokens``. Hence instantiating it here.
97
+ # The way Qwen gets around this is by checking against SPECIAL_TOKENS
98
+ # But I think it's better to check against the objects own `special_tokens`
99
+ # in case we eventually want to allow the tokenizer to have special tokens.
100
+ self.special_tokens = SPECIAL_TOKENS
101
+
102
+ super().__init__(**kwargs)
103
+ self.errors = errors
104
+
105
+ base = tiktoken.get_encoding("cl100k_base")
106
+ if vocab_file is None:
107
+ self.mergeable_ranks: Dict[bytes, int] = base._mergeable_ranks
108
+ else:
109
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file)
110
+
111
+ self.pat_str = base._pat_str
112
+
113
+ enc = tiktoken.Encoding(
114
+ name="phi3small",
115
+ pat_str=self.pat_str,
116
+ mergeable_ranks=self.mergeable_ranks,
117
+ special_tokens=self.special_tokens,
118
+ )
119
+ self.tokenizer = enc
120
+
121
+ self.decoder: Dict[int, bytes] = {
122
+ v: k for k, v in self.mergeable_ranks.items()
123
+ }
124
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
125
+
126
+ self.eod_id = self.tokenizer.eot_token
127
+ self._eos_token = self._convert_id_to_token(self.eod_id)
128
+
129
+ # Setting the bos_token to be the same as the eos_token
130
+ # Note that this is **not** the correct thing to do, and is done
131
+ # just so that some of the downstream libraries do not break.
132
+ self._bos_token = self._eos_token
133
+
134
+ # Assign the special tokens to class variables
135
+ self.system_id = self.special_tokens["<|system|>"]
136
+ self.user_id = self.special_tokens["<|user|>"]
137
+ self.assistant_id = self.special_tokens["<|assistant|>"]
138
+ self.end_id = self.special_tokens["<|end|>"]
139
+
140
+ @cached_property
141
+ def dummy_token_indices(self) -> List[int]:
142
+ # There are some additional special tokens in the cl100k_base tokenizer
143
+ # that we do not use. Hence, we also consider them to be dummy tokens.
144
+ additional_tokens = [
145
+ "<|fim_prefix|>",
146
+ "<|fim_middle|>",
147
+ "<|fim_suffix|>",
148
+ "<|endofprompt|>"
149
+ ]
150
+ dummy_token_indices = [index for token, index in self.special_tokens.items() if "dummy_id" in token]
151
+ dummy_token_indices.extend([self.special_tokens[token] for token in additional_tokens])
152
+ return sorted(dummy_token_indices)
153
+
154
+ def __getstate__(self):
155
+ state = self.__dict__.copy()
156
+ del state["tokenizer"]
157
+ return state
158
+
159
+ def __setstate__(self, state):
160
+ self.__dict__ = state
161
+ enc = tiktoken.Encoding(
162
+ name="cl100k_im",
163
+ pat_str=self.pat_str,
164
+ mergeable_ranks=self.mergeable_ranks,
165
+ special_tokens=self.special_tokens,
166
+ )
167
+ self.tokenizer = enc
168
+
169
+ def __len__(self):
170
+ return self.tokenizer.n_vocab
171
+
172
+ @classmethod
173
+ def from_pretrained(
174
+ cls,
175
+ pretrained_model_name_or_path: Union[str, os.PathLike],
176
+ *init_inputs,
177
+ **kwargs,
178
+ ):
179
+ cls_kwargs = kwargs
180
+ # First try to load from the tokenization config if it exists
181
+ tokenization_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
182
+ if tokenization_config:
183
+ cls_kwargs.update(
184
+ dict(
185
+ model_max_length=tokenization_config["model_max_length"],
186
+ chat_template=tokenization_config.get("chat_template", None)
187
+ )
188
+ )
189
+ else:
190
+ config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=True)
191
+ cls_kwargs["model_max_length"] = config.max_position_embeddings
192
+ return cls(**cls_kwargs)
193
+
194
+ def get_vocab(self) -> Dict[Union[str, bytes], int]:
195
+ return {**self.mergeable_ranks, **self.special_tokens}
196
+
197
+ def convert_tokens_to_ids(
198
+ self,
199
+ tokens: Union[bytes, str, List[Union[bytes, str]]]
200
+ ) -> Union[int, List[int]]:
201
+ ids = []
202
+ if isinstance(tokens, (str, bytes)):
203
+ if tokens in self.special_tokens:
204
+ return self.special_tokens[tokens]
205
+ else:
206
+ return self.mergeable_ranks.get(tokens)
207
+ ids: List[int] = []
208
+ for token in tokens:
209
+ ids.append(self.convert_tokens_to_ids(token))
210
+ return ids
211
+
212
+ def _add_tokens(
213
+ self,
214
+ new_tokens: Union[List[str], List[AddedToken]],
215
+ special_tokens: bool = False,
216
+ ) -> int:
217
+ if not special_tokens and new_tokens:
218
+ raise ValueError("Only special tokens can be added to this tokenizer")
219
+ for token in new_tokens:
220
+ surface_form = token.content if isinstance(token, AddedToken) else token
221
+ if surface_form not in self.special_tokens:
222
+ raise ValueError(
223
+ "For now, we do not support unknown special tokens\n"
224
+ "In the future, if there is a need for this, we can add special tokens to the tokenizer\n"
225
+ "starting from rank 100261 - 100263 and then 100266 - 100275.\n"
226
+ "And finally, we can re-construct the enc object back\n"
227
+ )
228
+ return 0
229
+
230
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
231
+ file_path = os.path.join(save_directory, "cl100k_base.tiktoken")
232
+ with open(file_path, "w") as f:
233
+ for token, rank in self.mergeable_ranks.items():
234
+ line = base64.b64encode(token).decode("utf-8") + " " + str(rank) + "\n"
235
+ f.write(line)
236
+ return (file_path,)
237
+
238
+ def tokenize(
239
+ self,
240
+ text: str,
241
+ allowed_special: Union[Set, str] = "all",
242
+ disallowed_special: Union[Collection, str] = (),
243
+ **kwargs
244
+ ) -> List[Union[bytes, str]]:
245
+ tokens: List[Union[bytes, str]] = []
246
+ for token_id in self.tokenizer.encode(
247
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
248
+ ):
249
+ tokens.append(self.decoder[token_id])
250
+ return tokens
251
+
252
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
253
+ """
254
+ Converts a sequence of tokens in a single string.
255
+ """
256
+ text = ""
257
+ temp = b""
258
+ for t in tokens:
259
+ if isinstance(t, str):
260
+ if temp:
261
+ text += temp.decode("utf-8", errors=self.errors)
262
+ temp = b""
263
+ text += t
264
+ elif isinstance(t, bytes):
265
+ temp += t
266
+ else:
267
+ raise TypeError("token should only be of type types or str")
268
+ if temp:
269
+ text += temp.decode("utf-8", errors=self.errors)
270
+ return text
271
+
272
+ @property
273
+ def vocab_size(self):
274
+ return self.tokenizer.n_vocab
275
+
276
+ @property
277
+ def eos_token_id(self) -> int:
278
+ return self.eod_id
279
+
280
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
281
+ """Converts an id to a token, special tokens included"""
282
+ if index in self.decoder:
283
+ return self.decoder[index]
284
+ raise ValueError("unknown ids")
285
+
286
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
287
+ """Converts a token to an id using the vocab, special tokens included"""
288
+ if token in self.special_tokens:
289
+ return self.special_tokens[token]
290
+ if token in self.mergeable_ranks:
291
+ return self.mergeable_ranks[token]
292
+ raise ValueError("unknown token")
293
+
294
+ def _tokenize(self, text: str, **kwargs):
295
+ """
296
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
297
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
298
+ Do NOT take care of added tokens.
299
+ """
300
+ raise NotImplementedError
301
+
302
+ def _decode(
303
+ self,
304
+ token_ids: Union[int, List[int]],
305
+ skip_special_tokens: bool = False,
306
+ errors: str = None,
307
+ **kwargs,
308
+ ) -> str:
309
+ if isinstance(token_ids, int):
310
+ token_ids = [token_ids]
311
+ if skip_special_tokens:
312
+ token_ids = [i for i in token_ids if i < self.eod_id]
313
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
314
+
315
+