Bighost commited on
Commit
de0c5b7
1 Parent(s): d4d545e

Upload tokenizer

Browse files
qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "eos_token": "<|endoftext|>",
3
+ "pad_token": "<|endoftext|>"
4
+ }
tokenization_qwen.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Tokenization classes for QWen."""
7
+
8
+ import base64
9
+ import logging
10
+ import os
11
+ import unicodedata
12
+ from typing import Collection, Dict, List, Set, Tuple, Union
13
+
14
+ import tiktoken
15
+ from transformers import PreTrainedTokenizer, AddedToken
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
21
+
22
+ PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
23
+ ENDOFTEXT = "<|endoftext|>"
24
+ IMSTART = "<|im_start|>"
25
+ IMEND = "<|im_end|>"
26
+ # as the default behavior is changed to allow special tokens in
27
+ # regular texts, the surface forms of special tokens need to be
28
+ # as different as possible to minimize the impact
29
+ EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
30
+ # changed to use actual index to avoid misconfiguration with vocabulary expansion
31
+ SPECIAL_START_ID = 151643
32
+ SPECIAL_TOKENS = tuple(
33
+ enumerate(
34
+ (
35
+ (
36
+ ENDOFTEXT,
37
+ IMSTART,
38
+ IMEND,
39
+ )
40
+ + EXTRAS
41
+ ),
42
+ start=SPECIAL_START_ID,
43
+ )
44
+ )
45
+ SPECIAL_TOKENS_SET = set(t for i, t in SPECIAL_TOKENS)
46
+
47
+
48
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
49
+ with open(tiktoken_bpe_file, "rb") as f:
50
+ contents = f.read()
51
+ return {
52
+ base64.b64decode(token): int(rank)
53
+ for token, rank in (line.split() for line in contents.splitlines() if line)
54
+ }
55
+
56
+
57
+ class QWenTokenizer(PreTrainedTokenizer):
58
+ """QWen tokenizer."""
59
+
60
+ vocab_files_names = VOCAB_FILES_NAMES
61
+
62
+ def __init__(
63
+ self,
64
+ vocab_file,
65
+ errors="replace",
66
+ extra_vocab_file=None,
67
+ **kwargs,
68
+ ):
69
+ super().__init__(**kwargs)
70
+
71
+ # how to handle errors in decoding UTF-8 byte sequences
72
+ # use ignore if you are in streaming inference
73
+ self.errors = errors
74
+
75
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: Dict[bytes, int]
76
+ self.special_tokens = {
77
+ token: index
78
+ for index, token in SPECIAL_TOKENS
79
+ }
80
+
81
+ # try load extra vocab from file
82
+ if extra_vocab_file is not None:
83
+ used_ids = set(self.mergeable_ranks.values()) | set(self.special_tokens.values())
84
+ extra_mergeable_ranks = _load_tiktoken_bpe(extra_vocab_file)
85
+ for token, index in extra_mergeable_ranks.items():
86
+ if token in self.mergeable_ranks:
87
+ logger.info(f"extra token {token} exists, skipping")
88
+ continue
89
+ if index in used_ids:
90
+ logger.info(f'the index {index} for extra token {token} exists, skipping')
91
+ continue
92
+ self.mergeable_ranks[token] = index
93
+ # the index may be sparse after this, but don't worry tiktoken.Encoding will handle this
94
+
95
+ enc = tiktoken.Encoding(
96
+ "Qwen",
97
+ pat_str=PAT_STR,
98
+ mergeable_ranks=self.mergeable_ranks,
99
+ special_tokens=self.special_tokens,
100
+ )
101
+
102
+ self.decoder = {
103
+ v: k for k, v in self.mergeable_ranks.items()
104
+ } # type: dict[int, bytes|str]
105
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
106
+
107
+ self.tokenizer = enc # type: tiktoken.Encoding
108
+
109
+ self.eod_id = self.tokenizer.eot_token
110
+ self.im_start_id = self.special_tokens[IMSTART]
111
+ self.im_end_id = self.special_tokens[IMEND]
112
+
113
+ def __getstate__(self):
114
+ # for pickle lovers
115
+ state = self.__dict__.copy()
116
+ del state["tokenizer"]
117
+ return state
118
+
119
+ def __setstate__(self, state):
120
+ # tokenizer is not python native; don't pass it; rebuild it
121
+ self.__dict__.update(state)
122
+ enc = tiktoken.Encoding(
123
+ "Qwen",
124
+ pat_str=PAT_STR,
125
+ mergeable_ranks=self.mergeable_ranks,
126
+ special_tokens=self.special_tokens,
127
+ )
128
+ self.tokenizer = enc
129
+
130
+ def __len__(self) -> int:
131
+ return self.tokenizer.n_vocab
132
+
133
+ def get_vocab(self) -> Dict[bytes, int]:
134
+ return self.mergeable_ranks
135
+
136
+ def convert_tokens_to_ids(
137
+ self, tokens: Union[bytes, str, List[Union[bytes, str]]]
138
+ ) -> List[int]:
139
+ ids = []
140
+ if isinstance(tokens, (str, bytes)):
141
+ if tokens in self.special_tokens:
142
+ return self.special_tokens[tokens]
143
+ else:
144
+ return self.mergeable_ranks.get(tokens)
145
+ for token in tokens:
146
+ if token in self.special_tokens:
147
+ ids.append(self.special_tokens[token])
148
+ else:
149
+ ids.append(self.mergeable_ranks.get(token))
150
+ return ids
151
+
152
+ def _add_tokens(
153
+ self,
154
+ new_tokens: Union[List[str], List[AddedToken]],
155
+ special_tokens: bool = False,
156
+ ) -> int:
157
+ if not special_tokens and new_tokens:
158
+ raise ValueError("Adding regular tokens is not supported")
159
+ for token in new_tokens:
160
+ surface_form = token.content if isinstance(token, AddedToken) else token
161
+ if surface_form not in SPECIAL_TOKENS_SET:
162
+ raise ValueError("Adding unknown special tokens is not supported")
163
+ return 0
164
+
165
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
166
+ """
167
+ Save only the vocabulary of the tokenizer (vocabulary).
168
+
169
+ Returns:
170
+ `Tuple(str)`: Paths to the files saved.
171
+ """
172
+ file_path = os.path.join(save_directory, "qwen.tiktoken")
173
+ with open(file_path, "w", encoding="utf8") as w:
174
+ for k, v in self.mergeable_ranks.items():
175
+ line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
176
+ w.write(line)
177
+ return (file_path,)
178
+
179
+ def tokenize(
180
+ self,
181
+ text: str,
182
+ allowed_special: Union[Set, str] = "all",
183
+ disallowed_special: Union[Collection, str] = (),
184
+ **kwargs,
185
+ ) -> List[Union[bytes, str]]:
186
+ """
187
+ Converts a string in a sequence of tokens.
188
+
189
+ Args:
190
+ text (`str`):
191
+ The sequence to be encoded.
192
+ allowed_special (`Literal["all"]` or `set`):
193
+ The surface forms of the tokens to be encoded as special tokens in regular texts.
194
+ Default to "all".
195
+ disallowed_special (`Literal["all"]` or `Collection`):
196
+ The surface forms of the tokens that should not be in regular texts and trigger errors.
197
+ Default to an empty tuple.
198
+
199
+ kwargs (additional keyword arguments, *optional*):
200
+ Will be passed to the underlying model specific encode method.
201
+
202
+ Returns:
203
+ `List[bytes|str]`: The list of tokens.
204
+ """
205
+ tokens = []
206
+ text = unicodedata.normalize("NFC", text)
207
+
208
+ # this implementation takes a detour: text -> token id -> token surface forms
209
+ for t in self.tokenizer.encode(
210
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
211
+ ):
212
+ tokens.append(self.decoder[t])
213
+ return tokens
214
+
215
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
216
+ """
217
+ Converts a sequence of tokens in a single string.
218
+ """
219
+ text = ""
220
+ temp = b""
221
+ for t in tokens:
222
+ if isinstance(t, str):
223
+ if temp:
224
+ text += temp.decode("utf-8", errors=self.errors)
225
+ temp = b""
226
+ text += t
227
+ elif isinstance(t, bytes):
228
+ temp += t
229
+ else:
230
+ raise TypeError("token should only be of type types or str")
231
+ if temp:
232
+ text += temp.decode("utf-8", errors=self.errors)
233
+ return text
234
+
235
+ @property
236
+ def vocab_size(self):
237
+ return self.tokenizer.n_vocab
238
+
239
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
240
+ """Converts an id to a token, special tokens included"""
241
+ if index in self.decoder:
242
+ return self.decoder[index]
243
+ raise ValueError("unknown ids")
244
+
245
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
246
+ """Converts a token to an id using the vocab, special tokens included"""
247
+ if token in self.special_tokens:
248
+ return self.special_tokens[token]
249
+ if token in self.mergeable_ranks:
250
+ return self.mergeable_ranks[token]
251
+ raise ValueError("unknown token")
252
+
253
+ def _tokenize(self, text: str, **kwargs):
254
+ """
255
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
256
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
257
+
258
+ Do NOT take care of added tokens.
259
+ """
260
+ raise NotImplementedError
261
+
262
+ def _decode(
263
+ self,
264
+ token_ids: Union[int, List[int]],
265
+ skip_special_tokens: bool = False,
266
+ errors: str = None,
267
+ **kwargs,
268
+ ) -> str:
269
+ if isinstance(token_ids, int):
270
+ token_ids = [token_ids]
271
+ if skip_special_tokens:
272
+ token_ids = [i for i in token_ids if i < self.eod_id]
273
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {},
3
+ "auto_map": {
4
+ "AutoTokenizer": [
5
+ "tokenization_qwen.QWenTokenizer",
6
+ null
7
+ ]
8
+ },
9
+ "clean_up_tokenization_spaces": true,
10
+ "eos_token": "<|endoftext|>",
11
+ "model_max_length": 8192,
12
+ "pad_token": "<|endoftext|>",
13
+ "tokenizer_class": "QWenTokenizer"
14
+ }