cognitivess
commited on
Commit
•
980a0c2
1
Parent(s):
2fd97cf
Rename cognitivess_model/tokenization_Cognitivess_fast.py to cognitivess_model/tokenization_cognitivess_fast.py
Browse files
cognitivess_model/tokenization_Cognitivess_fast.py
DELETED
@@ -1,310 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import os
|
16 |
-
from shutil import copyfile
|
17 |
-
from typing import Optional, Tuple
|
18 |
-
|
19 |
-
from tokenizers import processors
|
20 |
-
|
21 |
-
from ...tokenization_utils_fast import PreTrainedTokenizerFast
|
22 |
-
from ...utils import is_sentencepiece_available, logging
|
23 |
-
from ...utils.versions import require_version
|
24 |
-
|
25 |
-
|
26 |
-
require_version("tokenizers>=0.13.3")
|
27 |
-
|
28 |
-
if is_sentencepiece_available():
|
29 |
-
from .tokenization_Cognitivess import CognitivessTokenizer
|
30 |
-
else:
|
31 |
-
CognitivessTokenizer = None
|
32 |
-
|
33 |
-
logger = logging.get_logger(__name__)
|
34 |
-
VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
|
35 |
-
|
36 |
-
B_INST, E_INST = "[INST]", "[/INST]"
|
37 |
-
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
38 |
-
|
39 |
-
# fmt: off
|
40 |
-
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
|
41 |
-
answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
|
42 |
-
that your responses are socially unbiased and positive in nature.
|
43 |
-
|
44 |
-
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
|
45 |
-
correct. If you don't know the answer to a question, please don't share false information."""
|
46 |
-
# fmt: on
|
47 |
-
|
48 |
-
|
49 |
-
class CognitivessTokenizerFast(PreTrainedTokenizerFast):
|
50 |
-
"""
|
51 |
-
Construct a Cognitivess tokenizer. Based on byte-level Byte-Pair-Encoding.
|
52 |
-
|
53 |
-
This uses notably ByteFallback and no normalization.
|
54 |
-
|
55 |
-
```python
|
56 |
-
>>> from transformers import CognitivessTokenizerFast
|
57 |
-
|
58 |
-
>>> tokenizer = CognitivessTokenizerFast.from_pretrained("hf-internal-testing/Cognitivess-tokenizer")
|
59 |
-
>>> tokenizer.encode("Hello this is a test")
|
60 |
-
[1, 15043, 445, 338, 263, 1243]
|
61 |
-
```
|
62 |
-
|
63 |
-
If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
|
64 |
-
call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
|
65 |
-
values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
|
66 |
-
[post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
|
67 |
-
|
68 |
-
|
69 |
-
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
70 |
-
refer to this superclass for more information regarding those methods.
|
71 |
-
|
72 |
-
Args:
|
73 |
-
vocab_file (`str`, *optional*):
|
74 |
-
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
|
75 |
-
contains the vocabulary necessary to instantiate a tokenizer.
|
76 |
-
tokenizer_file (`str`, *optional*):
|
77 |
-
[tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
|
78 |
-
contains everything needed to load the tokenizer.
|
79 |
-
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
80 |
-
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
|
81 |
-
extra spaces.
|
82 |
-
unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
|
83 |
-
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
84 |
-
token instead.
|
85 |
-
bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
|
86 |
-
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
87 |
-
eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
|
88 |
-
The end of sequence token.
|
89 |
-
add_bos_token (`bool`, *optional*, defaults to `True`):
|
90 |
-
Whether or not to add an `bos_token` at the start of sequences.
|
91 |
-
add_eos_token (`bool`, *optional*, defaults to `False`):
|
92 |
-
Whether or not to add an `eos_token` at the end of sequences.
|
93 |
-
use_default_system_prompt (`bool`, *optional*, defaults to `False`):
|
94 |
-
Whether or not the default system prompt for Cognitivess should be used
|
95 |
-
legacy (`bool`, *optional*):
|
96 |
-
Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
|
97 |
-
and #25224 which includes fixes to properly handle tokens that appear after special tokens.
|
98 |
-
Make sure to also set `from_slow` to `True`.
|
99 |
-
A simple example:
|
100 |
-
|
101 |
-
- `legacy=True`:
|
102 |
-
```python
|
103 |
-
>>> from transformers import CognitivessTokenizerFast
|
104 |
-
|
105 |
-
>>> tokenizer = CognitivessTokenizerFast.from_pretrained("CognitivessAI/cognitivess", legacy=True, from_slow=True)
|
106 |
-
>>> tokenizer.encode("Hello <s>.") # 869 is '▁.'
|
107 |
-
[1, 15043, 29871, 1, 869]
|
108 |
-
```
|
109 |
-
- `legacy=False`:
|
110 |
-
```python
|
111 |
-
>>> from transformers import CognitivessTokenizerFast
|
112 |
-
|
113 |
-
>>> tokenizer = CognitivessTokenizerFast.from_pretrained("CognitivessAI/cognitivess", legacy=False, from_slow=True)
|
114 |
-
>>> tokenizer.encode("Hello <s>.") # 29889 is '.'
|
115 |
-
[1, 15043, 29871, 1, 29889]
|
116 |
-
```
|
117 |
-
Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
|
118 |
-
add_prefix_space (`bool`, *optional*):
|
119 |
-
Whether or not the tokenizer should automatically add a prefix space
|
120 |
-
"""
|
121 |
-
|
122 |
-
vocab_files_names = VOCAB_FILES_NAMES
|
123 |
-
slow_tokenizer_class = CognitivessTokenizer
|
124 |
-
padding_side = "left"
|
125 |
-
model_input_names = ["input_ids", "attention_mask"]
|
126 |
-
|
127 |
-
def __init__(
|
128 |
-
self,
|
129 |
-
vocab_file=None,
|
130 |
-
tokenizer_file=None,
|
131 |
-
clean_up_tokenization_spaces=False,
|
132 |
-
unk_token="<unk>",
|
133 |
-
bos_token="<s>",
|
134 |
-
eos_token="</s>",
|
135 |
-
add_bos_token=True,
|
136 |
-
add_eos_token=False,
|
137 |
-
use_default_system_prompt=False,
|
138 |
-
legacy=None,
|
139 |
-
add_prefix_space=None,
|
140 |
-
**kwargs,
|
141 |
-
):
|
142 |
-
if legacy is None:
|
143 |
-
logger.warning_once(
|
144 |
-
f"You are using the default legacy behaviour of the {self.__class__}. This is"
|
145 |
-
" expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
|
146 |
-
" If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
|
147 |
-
" means, and thoroughly read the reason why this was added as explained in"
|
148 |
-
" https://github.com/huggingface/transformers/pull/24565 - if you loaded a Cognitivess tokenizer from a GGUF file"
|
149 |
-
" you can ignore this message."
|
150 |
-
)
|
151 |
-
legacy = True
|
152 |
-
self.legacy = legacy
|
153 |
-
|
154 |
-
if add_prefix_space is not None:
|
155 |
-
kwargs["from_slow"] = True
|
156 |
-
|
157 |
-
super().__init__(
|
158 |
-
vocab_file=vocab_file,
|
159 |
-
tokenizer_file=tokenizer_file,
|
160 |
-
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
161 |
-
unk_token=unk_token,
|
162 |
-
bos_token=bos_token,
|
163 |
-
eos_token=eos_token,
|
164 |
-
add_bos_token=add_bos_token,
|
165 |
-
add_eos_token=add_eos_token,
|
166 |
-
use_default_system_prompt=use_default_system_prompt,
|
167 |
-
add_prefix_space=add_prefix_space,
|
168 |
-
legacy=legacy,
|
169 |
-
**kwargs,
|
170 |
-
)
|
171 |
-
self._add_bos_token = add_bos_token
|
172 |
-
self._add_eos_token = add_eos_token
|
173 |
-
self.update_post_processor()
|
174 |
-
self.use_default_system_prompt = use_default_system_prompt
|
175 |
-
self.vocab_file = vocab_file
|
176 |
-
|
177 |
-
@property
|
178 |
-
def can_save_slow_tokenizer(self) -> bool:
|
179 |
-
return os.path.isfile(self.vocab_file) if self.vocab_file else False
|
180 |
-
|
181 |
-
def update_post_processor(self):
|
182 |
-
"""
|
183 |
-
Updates the underlying post processor with the current `bos_token` and `eos_token`.
|
184 |
-
"""
|
185 |
-
bos = self.bos_token
|
186 |
-
bos_token_id = self.bos_token_id
|
187 |
-
if bos is None and self.add_bos_token:
|
188 |
-
raise ValueError("add_bos_token = True but bos_token = None")
|
189 |
-
|
190 |
-
eos = self.eos_token
|
191 |
-
eos_token_id = self.eos_token_id
|
192 |
-
if eos is None and self.add_eos_token:
|
193 |
-
raise ValueError("add_eos_token = True but eos_token = None")
|
194 |
-
|
195 |
-
single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
|
196 |
-
pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
|
197 |
-
|
198 |
-
special_tokens = []
|
199 |
-
if self.add_bos_token:
|
200 |
-
special_tokens.append((bos, bos_token_id))
|
201 |
-
if self.add_eos_token:
|
202 |
-
special_tokens.append((eos, eos_token_id))
|
203 |
-
self._tokenizer.post_processor = processors.TemplateProcessing(
|
204 |
-
single=single, pair=pair, special_tokens=special_tokens
|
205 |
-
)
|
206 |
-
|
207 |
-
@property
|
208 |
-
def add_eos_token(self):
|
209 |
-
return self._add_eos_token
|
210 |
-
|
211 |
-
@property
|
212 |
-
def add_bos_token(self):
|
213 |
-
return self._add_bos_token
|
214 |
-
|
215 |
-
@add_eos_token.setter
|
216 |
-
def add_eos_token(self, value):
|
217 |
-
self._add_eos_token = value
|
218 |
-
self.update_post_processor()
|
219 |
-
|
220 |
-
@add_bos_token.setter
|
221 |
-
def add_bos_token(self, value):
|
222 |
-
self._add_bos_token = value
|
223 |
-
self.update_post_processor()
|
224 |
-
|
225 |
-
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
226 |
-
if not self.can_save_slow_tokenizer:
|
227 |
-
raise ValueError(
|
228 |
-
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
|
229 |
-
"tokenizer."
|
230 |
-
)
|
231 |
-
|
232 |
-
if not os.path.isdir(save_directory):
|
233 |
-
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
234 |
-
return
|
235 |
-
out_vocab_file = os.path.join(
|
236 |
-
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
237 |
-
)
|
238 |
-
|
239 |
-
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
|
240 |
-
copyfile(self.vocab_file, out_vocab_file)
|
241 |
-
|
242 |
-
return (out_vocab_file,)
|
243 |
-
|
244 |
-
@property
|
245 |
-
# Copied from transformers.models.Cognitivess.tokenization_Cognitivess.CognitivessTokenizer.default_chat_template
|
246 |
-
def default_chat_template(self):
|
247 |
-
"""
|
248 |
-
Cognitivess uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
|
249 |
-
Assistant messages do not have special tokens, because Cognitivess chat models are generally trained with strict
|
250 |
-
user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
|
251 |
-
rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
|
252 |
-
results in an unusual token ordering when it is present. This template should definitely be changed if you wish
|
253 |
-
to fine-tune a model with more flexible role ordering!
|
254 |
-
|
255 |
-
The output should look something like:
|
256 |
-
|
257 |
-
<bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
|
258 |
-
<bos>[INST] Prompt [/INST]
|
259 |
-
|
260 |
-
The reference for this chat template is [this code
|
261 |
-
snippet](https://github.com/facebookresearch/Cognitivess/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/Cognitivess/generation.py#L320-L362)
|
262 |
-
in the original repository.
|
263 |
-
"""
|
264 |
-
template = (
|
265 |
-
"{% if messages[0]['role'] == 'system' %}"
|
266 |
-
"{% set loop_messages = messages[1:] %}" # Extract system message if it's present
|
267 |
-
"{% set system_message = messages[0]['content'] %}"
|
268 |
-
"{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
|
269 |
-
"{% set loop_messages = messages %}" # Or use the default system message if the flag is set
|
270 |
-
"{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
|
271 |
-
"{% else %}"
|
272 |
-
"{% set loop_messages = messages %}"
|
273 |
-
"{% set system_message = false %}"
|
274 |
-
"{% endif %}"
|
275 |
-
"{% for message in loop_messages %}" # Loop over all non-system messages
|
276 |
-
"{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
|
277 |
-
"{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
|
278 |
-
"{% endif %}"
|
279 |
-
"{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
|
280 |
-
"{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
|
281 |
-
"{% else %}"
|
282 |
-
"{% set content = message['content'] %}"
|
283 |
-
"{% endif %}"
|
284 |
-
"{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
|
285 |
-
"{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
|
286 |
-
"{% elif message['role'] == 'system' %}"
|
287 |
-
"{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
|
288 |
-
"{% elif message['role'] == 'assistant' %}"
|
289 |
-
"{{ ' ' + content.strip() + ' ' + eos_token }}"
|
290 |
-
"{% endif %}"
|
291 |
-
"{% endfor %}"
|
292 |
-
)
|
293 |
-
template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
|
294 |
-
default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
|
295 |
-
template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
|
296 |
-
|
297 |
-
return template
|
298 |
-
|
299 |
-
# TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers
|
300 |
-
# Copied from transformers.models.Cognitivess.tokenization_Cognitivess.CognitivessTokenizer.build_inputs_with_special_tokens
|
301 |
-
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
302 |
-
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
|
303 |
-
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
304 |
-
|
305 |
-
output = bos_token_id + token_ids_0 + eos_token_id
|
306 |
-
|
307 |
-
if token_ids_1 is not None:
|
308 |
-
output = output + bos_token_id + token_ids_1 + eos_token_id
|
309 |
-
|
310 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cognitivess_model/tokenization_cognitivess_fast.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PreTrainedTokenizerFast
|
2 |
+
|
3 |
+
class CognitivessTokenizerFast(PreTrainedTokenizerFast):
|
4 |
+
def __init__(self, *args, **kwargs):
|
5 |
+
super().__init__(*args, **kwargs)
|