KaleiNeely
commited on
Commit
•
977a8a4
1
Parent(s):
4219d3d
Update tokenization_rwkv5.py
Browse files- tokenization_rwkv5.py +11 -242
tokenization_rwkv5.py
CHANGED
@@ -15,238 +15,8 @@
|
|
15 |
"""Tokenization classes for RWKV5."""
|
16 |
|
17 |
import os
|
18 |
-
from typing import TYPE_CHECKING, List, Optional, Tuple
|
19 |
import re
|
20 |
-
|
21 |
-
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
22 |
-
from transformers.utils import logging
|
23 |
-
|
24 |
-
|
25 |
-
if TYPE_CHECKING:
|
26 |
-
pass
|
27 |
-
|
28 |
-
logger = logging.get_logger(__name__)
|
29 |
-
|
30 |
-
VOCAB_FILES_NAMES = {
|
31 |
-
"vocab_file": "vocab.txt",
|
32 |
-
}
|
33 |
-
PRETRAINED_VOCAB_FILES_MAP = {
|
34 |
-
"vocab_file": {
|
35 |
-
"ArthurZ/rwkv-5-utf": "https://huggingface.co/ArthurZ/rwkv-5-utf/blob/main/vocab.txt",
|
36 |
-
},
|
37 |
-
}
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
def whitespace_tokenize(text):
|
42 |
-
"""Runs basic whitespace cleaning and splitting on a piece of text.
|
43 |
-
The separators are kept
|
44 |
-
"""
|
45 |
-
text = text.strip()
|
46 |
-
if not text:
|
47 |
-
return []
|
48 |
-
tokens = re.split(b"(?= )", text)
|
49 |
-
return tokens
|
50 |
-
|
51 |
-
|
52 |
-
class WordpieceTokenizer(object):
|
53 |
-
"""Runs WordPiece tokenization."""
|
54 |
-
|
55 |
-
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
|
56 |
-
self.vocab = vocab
|
57 |
-
self.unk_token = unk_token
|
58 |
-
self.max_input_chars_per_word = max_input_chars_per_word
|
59 |
-
|
60 |
-
def tokenize(self, text):
|
61 |
-
"""
|
62 |
-
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
|
63 |
-
tokenization using the given vocabulary.
|
64 |
-
|
65 |
-
For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
|
66 |
-
|
67 |
-
Args:
|
68 |
-
text: A single token or whitespace separated tokens. This should have
|
69 |
-
already been passed through *BasicTokenizer*.
|
70 |
-
|
71 |
-
Returns:
|
72 |
-
A list of wordpiece tokens.
|
73 |
-
"""
|
74 |
-
|
75 |
-
output_tokens = []
|
76 |
-
for token in whitespace_tokenize(text):
|
77 |
-
chars = list(token)
|
78 |
-
if len(chars) > self.max_input_chars_per_word:
|
79 |
-
output_tokens.append(self.unk_token)
|
80 |
-
continue
|
81 |
-
|
82 |
-
is_bad = False
|
83 |
-
start = 0
|
84 |
-
sub_tokens = []
|
85 |
-
while start < len(chars):
|
86 |
-
end = len(chars)
|
87 |
-
cur_substr = None
|
88 |
-
while start < end:
|
89 |
-
substr = bytes(chars[start:end])
|
90 |
-
if substr in self.vocab:
|
91 |
-
cur_substr = substr
|
92 |
-
break
|
93 |
-
end -= 1
|
94 |
-
if cur_substr is None:
|
95 |
-
is_bad = True
|
96 |
-
break
|
97 |
-
sub_tokens.append(cur_substr.decode())
|
98 |
-
start = end
|
99 |
-
|
100 |
-
if is_bad:
|
101 |
-
output_tokens.append(self.unk_token)
|
102 |
-
else:
|
103 |
-
output_tokens.extend(sub_tokens)
|
104 |
-
return output_tokens
|
105 |
-
|
106 |
-
|
107 |
-
class Rwkv5Tokenizer(PreTrainedTokenizer):
|
108 |
-
vocab_files_names = VOCAB_FILES_NAMES
|
109 |
-
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
110 |
-
max_model_input_sizes = {"ArthurZ/rwkv-5-utf": 2048}
|
111 |
-
|
112 |
-
model_input_names = ["input_ids", "attention_mask"]
|
113 |
-
|
114 |
-
def __init__(self, vocab_file, bos_token="<s>", eos_token="<s>", unk_token="<s>", pad_token="<s>",**kwargs):
|
115 |
-
if not os.path.isfile(vocab_file):
|
116 |
-
raise ValueError(
|
117 |
-
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
|
118 |
-
" model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
|
119 |
-
)
|
120 |
-
|
121 |
-
with open(vocab_file, "r") as reader:
|
122 |
-
tokens = reader.readlines()
|
123 |
-
vocab = {}
|
124 |
-
for index, token in enumerate(tokens):
|
125 |
-
token = eval(token.rstrip("\n"))
|
126 |
-
vocab[token] = index
|
127 |
-
|
128 |
-
self.add_bos_token = True
|
129 |
-
self.encoder = vocab
|
130 |
-
self.decoder = {v: k for k, v in vocab.items()}
|
131 |
-
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=str(unk_token))
|
132 |
-
self._added_tokens_decoder = {0: AddedToken(str(bos_token))}
|
133 |
-
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, **kwargs)
|
134 |
-
|
135 |
-
@property
|
136 |
-
def vocab_size(self):
|
137 |
-
return len(self.encoder)
|
138 |
-
|
139 |
-
def get_vocab(self):
|
140 |
-
vocab = {str(self.convert_ids_to_tokens(i)): i for i in range(self.vocab_size)}
|
141 |
-
vocab.update(self.added_tokens_encoder)
|
142 |
-
return vocab
|
143 |
-
|
144 |
-
def _tokenize(self, text, split_special_tokens=False):
|
145 |
-
return self.wordpiece_tokenizer.tokenize(text.encode("utf-8"))
|
146 |
-
|
147 |
-
def _convert_token_to_id(self, token):
|
148 |
-
"""Converts a token (byte) to an id using the vocab."""
|
149 |
-
if not isinstance(token, bytes):
|
150 |
-
token = token.encode("utf-8", errors="replace")
|
151 |
-
return self.encoder.get(token, self.unk_token_id)
|
152 |
-
|
153 |
-
def _convert_id_to_token(self, index):
|
154 |
-
"""Converts an index (integer) in a token (byte) using the vocab."""
|
155 |
-
token = self.decoder.get(index, self.unk_token)
|
156 |
-
if isinstance(token, (bytes)):
|
157 |
-
token = token.decode("utf-8", errors="replace")
|
158 |
-
return token
|
159 |
-
|
160 |
-
def convert_tokens_to_string(self, tokens):
|
161 |
-
"""Converts a sequence of tokens (bytes) in a single string. Additional tokens are encoded to bytes"""
|
162 |
-
out_string = b"".join([k.encode(errors="replace") if isinstance(k, str) else k for k in tokens]).decode(
|
163 |
-
"utf-8"
|
164 |
-
)
|
165 |
-
return out_string
|
166 |
-
|
167 |
-
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
168 |
-
index = 0
|
169 |
-
if os.path.isdir(save_directory):
|
170 |
-
vocab_file = os.path.join(
|
171 |
-
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
172 |
-
)
|
173 |
-
else:
|
174 |
-
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
175 |
-
with open(vocab_file, "w") as writer:
|
176 |
-
for token, token_index in sorted(self.encoder.items(), key=lambda kv: kv[1]):
|
177 |
-
if index != token_index:
|
178 |
-
logger.warning(
|
179 |
-
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
|
180 |
-
" Please check that the vocabulary is not corrupted!"
|
181 |
-
)
|
182 |
-
index = token_index
|
183 |
-
writer.write(str(token) + "\n")
|
184 |
-
index += 1
|
185 |
-
return (vocab_file,)
|
186 |
-
|
187 |
-
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
188 |
-
if self.add_bos_token:
|
189 |
-
bos_token_ids = [self.bos_token_id]
|
190 |
-
else:
|
191 |
-
bos_token_ids = []
|
192 |
-
|
193 |
-
output = bos_token_ids + token_ids_0
|
194 |
-
|
195 |
-
if token_ids_1 is None:
|
196 |
-
return output
|
197 |
-
|
198 |
-
return output + bos_token_ids + token_ids_1
|
199 |
-
|
200 |
-
def get_special_tokens_mask(
|
201 |
-
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
202 |
-
) -> List[int]:
|
203 |
-
"""
|
204 |
-
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
|
205 |
-
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
|
206 |
-
|
207 |
-
Args:
|
208 |
-
token_ids_0 (`List[int]`):
|
209 |
-
List of IDs.
|
210 |
-
token_ids_1 (`List[int]`, *optional*):
|
211 |
-
Optional second list of IDs for sequence pairs.
|
212 |
-
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
213 |
-
Whether or not the token list is already formatted with special tokens for the model.
|
214 |
-
|
215 |
-
Returns:
|
216 |
-
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
217 |
-
"""
|
218 |
-
if already_has_special_tokens:
|
219 |
-
return super().get_special_tokens_mask(
|
220 |
-
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
221 |
-
)
|
222 |
-
|
223 |
-
if not self.add_bos_token:
|
224 |
-
return super().get_special_tokens_mask(
|
225 |
-
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False
|
226 |
-
)
|
227 |
-
|
228 |
-
if token_ids_1 is None:
|
229 |
-
return [1] + ([0] * len(token_ids_0))
|
230 |
-
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
|
231 |
-
# coding=utf-8
|
232 |
-
# Copyright 2024 The HuggingFace Inc. team.
|
233 |
-
#
|
234 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
235 |
-
# you may not use this file except in compliance with the License.
|
236 |
-
# You may obtain a copy of the License at
|
237 |
-
#
|
238 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
239 |
-
#
|
240 |
-
# Unless required by applicable law or agreed to in writing, software
|
241 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
242 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
243 |
-
# See the License for the specific language governing permissions and
|
244 |
-
# limitations under the License.
|
245 |
-
"""Tokenization classes for RWKV5."""
|
246 |
-
|
247 |
-
import os
|
248 |
from typing import TYPE_CHECKING, List, Optional, Tuple
|
249 |
-
import re
|
250 |
|
251 |
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
252 |
from transformers.utils import logging
|
@@ -267,7 +37,6 @@ PRETRAINED_VOCAB_FILES_MAP = {
|
|
267 |
}
|
268 |
|
269 |
|
270 |
-
|
271 |
def whitespace_tokenize(text):
|
272 |
"""Runs basic whitespace cleaning and splitting on a piece of text.
|
273 |
The separators are kept
|
@@ -282,10 +51,9 @@ def whitespace_tokenize(text):
|
|
282 |
class WordpieceTokenizer(object):
|
283 |
"""Runs WordPiece tokenization."""
|
284 |
|
285 |
-
def __init__(self, vocab, unk_token
|
286 |
self.vocab = vocab
|
287 |
self.unk_token = unk_token
|
288 |
-
self.max_input_chars_per_word = max_input_chars_per_word
|
289 |
|
290 |
def tokenize(self, text):
|
291 |
"""
|
@@ -305,10 +73,6 @@ class WordpieceTokenizer(object):
|
|
305 |
output_tokens = []
|
306 |
for token in whitespace_tokenize(text):
|
307 |
chars = list(token)
|
308 |
-
if len(chars) > self.max_input_chars_per_word:
|
309 |
-
output_tokens.append(self.unk_token)
|
310 |
-
continue
|
311 |
-
|
312 |
is_bad = False
|
313 |
start = 0
|
314 |
sub_tokens = []
|
@@ -324,9 +88,12 @@ class WordpieceTokenizer(object):
|
|
324 |
if cur_substr is None:
|
325 |
is_bad = True
|
326 |
break
|
327 |
-
|
|
|
|
|
|
|
|
|
328 |
start = end
|
329 |
-
|
330 |
if is_bad:
|
331 |
output_tokens.append(self.unk_token)
|
332 |
else:
|
@@ -341,7 +108,7 @@ class Rwkv5Tokenizer(PreTrainedTokenizer):
|
|
341 |
|
342 |
model_input_names = ["input_ids", "attention_mask"]
|
343 |
|
344 |
-
def __init__(self, vocab_file, bos_token="<s>", eos_token="<s>", unk_token="<s>",
|
345 |
if not os.path.isfile(vocab_file):
|
346 |
raise ValueError(
|
347 |
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
|
@@ -360,7 +127,7 @@ class Rwkv5Tokenizer(PreTrainedTokenizer):
|
|
360 |
self.decoder = {v: k for k, v in vocab.items()}
|
361 |
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=str(unk_token))
|
362 |
self._added_tokens_decoder = {0: AddedToken(str(bos_token))}
|
363 |
-
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,
|
364 |
|
365 |
@property
|
366 |
def vocab_size(self):
|
@@ -376,7 +143,9 @@ class Rwkv5Tokenizer(PreTrainedTokenizer):
|
|
376 |
|
377 |
def _convert_token_to_id(self, token):
|
378 |
"""Converts a token (byte) to an id using the vocab."""
|
379 |
-
if
|
|
|
|
|
380 |
token = token.encode("utf-8", errors="replace")
|
381 |
return self.encoder.get(token, self.unk_token_id)
|
382 |
|
|
|
15 |
"""Tokenization classes for RWKV5."""
|
16 |
|
17 |
import os
|
|
|
18 |
import re
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
from typing import TYPE_CHECKING, List, Optional, Tuple
|
|
|
20 |
|
21 |
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
22 |
from transformers.utils import logging
|
|
|
37 |
}
|
38 |
|
39 |
|
|
|
40 |
def whitespace_tokenize(text):
|
41 |
"""Runs basic whitespace cleaning and splitting on a piece of text.
|
42 |
The separators are kept
|
|
|
51 |
class WordpieceTokenizer(object):
|
52 |
"""Runs WordPiece tokenization."""
|
53 |
|
54 |
+
def __init__(self, vocab, unk_token):
|
55 |
self.vocab = vocab
|
56 |
self.unk_token = unk_token
|
|
|
57 |
|
58 |
def tokenize(self, text):
|
59 |
"""
|
|
|
73 |
output_tokens = []
|
74 |
for token in whitespace_tokenize(text):
|
75 |
chars = list(token)
|
|
|
|
|
|
|
|
|
76 |
is_bad = False
|
77 |
start = 0
|
78 |
sub_tokens = []
|
|
|
88 |
if cur_substr is None:
|
89 |
is_bad = True
|
90 |
break
|
91 |
+
try:
|
92 |
+
cur_substr = cur_substr.decode()
|
93 |
+
except UnicodeDecodeError:
|
94 |
+
cur_substr = str(cur_substr)
|
95 |
+
sub_tokens.append(cur_substr)
|
96 |
start = end
|
|
|
97 |
if is_bad:
|
98 |
output_tokens.append(self.unk_token)
|
99 |
else:
|
|
|
108 |
|
109 |
model_input_names = ["input_ids", "attention_mask"]
|
110 |
|
111 |
+
def __init__(self, vocab_file, bos_token="<s>", eos_token="<s>", unk_token="<s>", **kwargs):
|
112 |
if not os.path.isfile(vocab_file):
|
113 |
raise ValueError(
|
114 |
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
|
|
|
127 |
self.decoder = {v: k for k, v in vocab.items()}
|
128 |
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=str(unk_token))
|
129 |
self._added_tokens_decoder = {0: AddedToken(str(bos_token))}
|
130 |
+
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)
|
131 |
|
132 |
@property
|
133 |
def vocab_size(self):
|
|
|
143 |
|
144 |
def _convert_token_to_id(self, token):
|
145 |
"""Converts a token (byte) to an id using the vocab."""
|
146 |
+
if token.startswith("b'\\"):
|
147 |
+
token = eval(token)
|
148 |
+
elif not isinstance(token, bytes):
|
149 |
token = token.encode("utf-8", errors="replace")
|
150 |
return self.encoder.get(token, self.unk_token_id)
|
151 |
|