Leyo HF staff commited on
Commit
9d8f3cc
1 Parent(s): 374a419

add processing/tokenization siglip

Browse files
Files changed (2) hide show
  1. processing_siglip.py +143 -0
  2. tokenization_siglip.py +389 -0
processing_siglip.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for SigLIP.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from transformers.feature_extraction_utils import BatchFeature
22
+ from transformers.image_utils import ImageInput
23
+ from transformers.processing_utils import ProcessorMixin
24
+ from transformers.tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
25
+ from transformers.utils import TensorType
26
+
27
+
28
+ class SiglipProcessor(ProcessorMixin):
29
+ r"""
30
+ Constructs a Siglip processor which wraps a Siglip image processor and a Siglip tokenizer into a single processor.
31
+
32
+ [`SiglipProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`SiglipTokenizer`]. See the
33
+ [`~SiglipProcessor.__call__`] and [`~SiglipProcessor.decode`] for more information.
34
+
35
+ Args:
36
+ image_processor ([`SiglipImageProcessor`]):
37
+ The image processor is a required input.
38
+ tokenizer ([`SiglipTokenizer`]):
39
+ The tokenizer is a required input.
40
+ """
41
+
42
+ attributes = ["image_processor", "tokenizer"]
43
+ image_processor_class = "SiglipImageProcessor"
44
+ tokenizer_class = "SiglipTokenizer"
45
+
46
+ def __init__(self, image_processor, tokenizer):
47
+ super().__init__(image_processor, tokenizer)
48
+
49
+ def __call__(
50
+ self,
51
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
52
+ images: ImageInput = None,
53
+ padding: Union[bool, str, PaddingStrategy] = False,
54
+ truncation: Union[bool, str, TruncationStrategy] = None,
55
+ max_length: int = None,
56
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
57
+ ) -> BatchFeature:
58
+ """
59
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
60
+ and `kwargs` arguments to SiglipTokenizer's [`~SiglipTokenizer.__call__`] if `text` is not `None` to encode
61
+ the text. To prepare the image(s), this method forwards the `images` argument to
62
+ SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
63
+ of the above two methods for more information.
64
+
65
+ Args:
66
+ text (`str`, `List[str]`, `List[List[str]]`):
67
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
68
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
69
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
70
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
71
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
72
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
73
+ number of channels, H and W are image height and width.
74
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
75
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
76
+ index) among:
77
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
78
+ sequence if provided).
79
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
80
+ acceptable input length for the model if that argument is not provided.
81
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
82
+ lengths).
83
+ max_length (`int`, *optional*):
84
+ Maximum length of the returned list and optionally padding length (see above).
85
+ truncation (`bool`, *optional*):
86
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
87
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
88
+ If set, will return tensors of a particular framework. Acceptable values are:
89
+
90
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
91
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
92
+ - `'np'`: Return NumPy `np.ndarray` objects.
93
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
94
+
95
+ Returns:
96
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
97
+
98
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
99
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
100
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
101
+ `None`).
102
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
103
+ """
104
+
105
+ if text is None and images is None:
106
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
107
+
108
+ if text is not None:
109
+ encoding = self.tokenizer(
110
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
111
+ )
112
+
113
+ if images is not None:
114
+ image_features = self.image_processor(images, return_tensors=return_tensors)
115
+
116
+ if text is not None and images is not None:
117
+ encoding["pixel_values"] = image_features.pixel_values
118
+ return encoding
119
+ elif text is not None:
120
+ return encoding
121
+ else:
122
+ return BatchFeature(data=dict(**image_features), tensor_type=return_tensors)
123
+
124
+ def decode(self, *args, **kwargs):
125
+ """
126
+ This method forwards all its arguments to SiglipTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
127
+ the docstring of this method for more information.
128
+ """
129
+ return self.tokenizer.decode(*args, **kwargs)
130
+
131
+ def batch_decode(self, *args, **kwargs):
132
+ """
133
+ This method forwards all its arguments to SiglipTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
134
+ refer to the docstring of this method for more information.
135
+ """
136
+ return self.tokenizer.batch_decode(*args, **kwargs)
137
+
138
+ @property
139
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->Siglip, T5->Siglip
140
+ def model_input_names(self):
141
+ tokenizer_input_names = self.tokenizer.model_input_names
142
+ image_processor_input_names = self.image_processor.model_input_names
143
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
tokenization_siglip.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for SigLIP model."""
16
+
17
+ import os
18
+ import re
19
+ import string
20
+ import warnings
21
+ from shutil import copyfile
22
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
23
+
24
+ import sentencepiece as spm
25
+
26
+ from transformers.convert_slow_tokenizer import import_protobuf
27
+ from transformers.tokenization_utils import PreTrainedTokenizer
28
+ from transformers.tokenization_utils_base import AddedToken
29
+
30
+
31
+ if TYPE_CHECKING:
32
+ from transformers.tokenization_utils_base import TextInput
33
+ from transformers.utils import logging, requires_backends
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
39
+
40
+ PRETRAINED_VOCAB_FILES_MAP = {
41
+ "vocab_file": {
42
+ "google/siglip-base-patch16-224": "https://huggingface.co/google/siglip-base-patch16-224/resolve/main/spiece.model",
43
+ }
44
+ }
45
+
46
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
47
+ "google/siglip-base-patch16-224": 256,
48
+ }
49
+
50
+ SPIECE_UNDERLINE = "▁"
51
+
52
+
53
+ class SiglipTokenizer(PreTrainedTokenizer):
54
+ """
55
+ Construct a Siglip tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
63
+ contains the vocabulary necessary to instantiate a tokenizer.
64
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
65
+ The end of sequence token.
66
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
67
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
68
+ token instead.
69
+ pad_token (`str`, *optional*, defaults to `"</s>"`):
70
+ The token used for padding, for example when batching sequences of different lengths.
71
+ additional_special_tokens (`List[str]`, *optional*):
72
+ Additional special tokens used by the tokenizer.
73
+ sp_model_kwargs (`dict`, *optional*):
74
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
75
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
76
+ to set:
77
+
78
+ - `enable_sampling`: Enable subword regularization.
79
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
80
+
81
+ - `nbest_size = {0,1}`: No sampling is performed.
82
+ - `nbest_size > 1`: samples from the nbest_size results.
83
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
84
+ using forward-filtering-and-backward-sampling algorithm.
85
+
86
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
87
+ BPE-dropout.
88
+ model_max_length (`int`, *optional*, defaults to 64):
89
+ The maximum length (in number of tokens) for model inputs.
90
+ do_lower_case (`bool`, *optional*, defaults to `True`):
91
+ Whether or not to lowercase the input when tokenizing.
92
+ """
93
+
94
+ vocab_files_names = VOCAB_FILES_NAMES
95
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
96
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
97
+ model_input_names = ["input_ids", "attention_mask"]
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_file,
102
+ eos_token="</s>",
103
+ unk_token="<unk>",
104
+ pad_token="</s>",
105
+ additional_special_tokens=None,
106
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
107
+ model_max_length=64,
108
+ do_lower_case=True,
109
+ **kwargs,
110
+ ) -> None:
111
+ requires_backends(self, "protobuf")
112
+
113
+ pad_token = (
114
+ AddedToken(pad_token, rstrip=True, lstrip=True, normalized=False, special=True)
115
+ if isinstance(pad_token, str)
116
+ else pad_token
117
+ )
118
+ unk_token = (
119
+ AddedToken(unk_token, rstrip=True, lstrip=True, normalized=False, special=True)
120
+ if isinstance(unk_token, str)
121
+ else unk_token
122
+ )
123
+ eos_token = (
124
+ AddedToken(eos_token, rstrip=True, lstrip=True, normalized=False, special=True)
125
+ if isinstance(eos_token, str)
126
+ else eos_token
127
+ )
128
+
129
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
130
+
131
+ self.do_lower_case = do_lower_case
132
+ self.vocab_file = vocab_file
133
+
134
+ self.sp_model = self.get_spm_processor()
135
+ self.vocab_file = vocab_file
136
+
137
+ super().__init__(
138
+ eos_token=eos_token,
139
+ unk_token=unk_token,
140
+ pad_token=pad_token,
141
+ additional_special_tokens=additional_special_tokens,
142
+ sp_model_kwargs=self.sp_model_kwargs,
143
+ model_max_length=model_max_length,
144
+ do_lower_case=do_lower_case,
145
+ **kwargs,
146
+ )
147
+
148
+ def get_spm_processor(self):
149
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
150
+ with open(self.vocab_file, "rb") as f:
151
+ sp_model = f.read()
152
+ model_pb2 = import_protobuf()
153
+ model = model_pb2.ModelProto.FromString(sp_model)
154
+ normalizer_spec = model_pb2.NormalizerSpec()
155
+ normalizer_spec.add_dummy_prefix = False
156
+ model.normalizer_spec.MergeFrom(normalizer_spec)
157
+ sp_model = model.SerializeToString()
158
+ tokenizer.LoadFromSerializedProto(sp_model)
159
+ return tokenizer
160
+
161
+ @property
162
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.vocab_size
163
+ def vocab_size(self):
164
+ return self.sp_model.get_piece_size()
165
+
166
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_vocab
167
+ def get_vocab(self):
168
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
169
+ vocab.update(self.added_tokens_encoder)
170
+ return vocab
171
+
172
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_special_tokens_mask
173
+ def get_special_tokens_mask(
174
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
175
+ ) -> List[int]:
176
+ """
177
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
178
+ special tokens using the tokenizer `prepare_for_model` method.
179
+
180
+ Args:
181
+ token_ids_0 (`List[int]`):
182
+ List of IDs.
183
+ token_ids_1 (`List[int]`, *optional*):
184
+ Optional second list of IDs for sequence pairs.
185
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
186
+ Whether or not the token list is already formatted with special tokens for the model.
187
+
188
+ Returns:
189
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
190
+ """
191
+ if already_has_special_tokens:
192
+ return super().get_special_tokens_mask(
193
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
194
+ )
195
+
196
+ # normal case: some special tokens
197
+ if token_ids_1 is None:
198
+ return ([0] * len(token_ids_0)) + [1]
199
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
200
+
201
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._add_eos_if_not_present
202
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
203
+ """Do not add eos again if user already added it."""
204
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
205
+ warnings.warn(
206
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
207
+ " eos tokens being added."
208
+ )
209
+ return token_ids
210
+ else:
211
+ return token_ids + [self.eos_token_id]
212
+
213
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.create_token_type_ids_from_sequences
214
+ def create_token_type_ids_from_sequences(
215
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
216
+ ) -> List[int]:
217
+ """
218
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
219
+ use of token type ids, therefore a list of zeros is returned.
220
+
221
+ Args:
222
+ token_ids_0 (`List[int]`):
223
+ List of IDs.
224
+ token_ids_1 (`List[int]`, *optional*):
225
+ Optional second list of IDs for sequence pairs.
226
+
227
+ Returns:
228
+ `List[int]`: List of zeros.
229
+ """
230
+ eos = [self.eos_token_id]
231
+
232
+ if token_ids_1 is None:
233
+ return len(token_ids_0 + eos) * [0]
234
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
235
+
236
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.build_inputs_with_special_tokens
237
+ def build_inputs_with_special_tokens(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
239
+ ) -> List[int]:
240
+ """
241
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
242
+ adding special tokens. A sequence has the following format:
243
+
244
+ - single sequence: `X </s>`
245
+ - pair of sequences: `A </s> B </s>`
246
+
247
+ Args:
248
+ token_ids_0 (`List[int]`):
249
+ List of IDs to which the special tokens will be added.
250
+ token_ids_1 (`List[int]`, *optional*):
251
+ Optional second list of IDs for sequence pairs.
252
+
253
+ Returns:
254
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
255
+ """
256
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
257
+ if token_ids_1 is None:
258
+ return token_ids_0
259
+ else:
260
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
261
+ return token_ids_0 + token_ids_1
262
+
263
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.__getstate__
264
+ def __getstate__(self):
265
+ state = self.__dict__.copy()
266
+ state["sp_model"] = None
267
+ return state
268
+
269
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.__setstate__
270
+ def __setstate__(self, d):
271
+ self.__dict__ = d
272
+
273
+ # for backward compatibility
274
+ if not hasattr(self, "sp_model_kwargs"):
275
+ self.sp_model_kwargs = {}
276
+
277
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
278
+ self.sp_model.Load(self.vocab_file)
279
+
280
+ def remove_punctuation(self, text: str) -> str:
281
+ return text.translate(str.maketrans("", "", string.punctuation))
282
+
283
+ # source: https://github.com/google-research/big_vision/blob/3b8e5ab6ad4f96e32b32826f9e1b8fd277914f9c/big_vision/evaluators/proj/image_text/prompt_engineering.py#L94
284
+ def canonicalize_text(self, text, *, keep_punctuation_exact_string=None):
285
+ """Returns canonicalized `text` (puncuation removed).
286
+
287
+ Args:
288
+ text (`str`):
289
+ String to be canonicalized.
290
+ keep_punctuation_exact_string (`str`, *optional*):
291
+ If provided, then this exact string is kept. For example providing '{}' will keep any occurrences of '{}'
292
+ (but will still remove '{' and '}' that appear separately).
293
+ """
294
+ if keep_punctuation_exact_string:
295
+ text = keep_punctuation_exact_string.join(
296
+ self.remove_punctuation(part) for part in text.split(keep_punctuation_exact_string)
297
+ )
298
+ else:
299
+ text = self.remove_punctuation(text)
300
+ text = re.sub(r"\s+", " ", text)
301
+ text = text.strip()
302
+
303
+ return text
304
+
305
+ def tokenize(self, text: "TextInput", add_special_tokens=False, **kwargs) -> List[str]:
306
+ """
307
+ Converts a string to a list of tokens.
308
+ """
309
+ tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " "), **kwargs)
310
+
311
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
312
+ tokens = tokens[1:]
313
+ return tokens
314
+
315
+ @property
316
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.unk_token_length
317
+ def unk_token_length(self):
318
+ return len(self.sp_model.encode(str(self.unk_token)))
319
+
320
+ def _tokenize(self, text, **kwargs):
321
+ """
322
+ Returns a tokenized string.
323
+
324
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
325
+ SPIECE_UNDERLINE.
326
+
327
+ For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`.
328
+
329
+ Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
330
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
331
+ """
332
+ text = self.canonicalize_text(text, keep_punctuation_exact_string=None)
333
+ tokens = self.sp_model.encode(text, out_type=str)
334
+
335
+ # 1. Encode string + prefix ex: "<unk> Hey"
336
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
337
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
338
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
339
+
340
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._convert_token_to_id
341
+ def _convert_token_to_id(self, token):
342
+ """Converts a token (str) in an id using the vocab."""
343
+ return self.sp_model.piece_to_id(token)
344
+
345
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._convert_id_to_token
346
+ def _convert_id_to_token(self, index):
347
+ """Converts an index (integer) in a token (str) using the vocab."""
348
+ token = self.sp_model.IdToPiece(index)
349
+ return token
350
+
351
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.convert_tokens_to_string
352
+ def convert_tokens_to_string(self, tokens):
353
+ """Converts a sequence of tokens (string) in a single string."""
354
+ current_sub_tokens = []
355
+ # since we manually add the prefix space, we have to remove it
356
+ tokens[0] = tokens[0].lstrip(SPIECE_UNDERLINE)
357
+ out_string = ""
358
+ prev_is_special = False
359
+ for token in tokens:
360
+ # make sure that special tokens are not decoded using sentencepiece model
361
+ if token in self.all_special_tokens:
362
+ if not prev_is_special:
363
+ out_string += " "
364
+ out_string += self.sp_model.decode(current_sub_tokens) + token
365
+ prev_is_special = True
366
+ current_sub_tokens = []
367
+ else:
368
+ current_sub_tokens.append(token)
369
+ prev_is_special = False
370
+ out_string += self.sp_model.decode(current_sub_tokens)
371
+ return out_string.strip()
372
+
373
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.save_vocabulary
374
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
375
+ if not os.path.isdir(save_directory):
376
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
377
+ return
378
+ out_vocab_file = os.path.join(
379
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
380
+ )
381
+
382
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
383
+ copyfile(self.vocab_file, out_vocab_file)
384
+ elif not os.path.isfile(self.vocab_file):
385
+ with open(out_vocab_file, "wb") as fi:
386
+ content_spiece_model = self.sp_model.serialized_model_proto()
387
+ fi.write(content_spiece_model)
388
+
389
+ return (out_vocab_file,)