Ozan Oktay commited on
Commit
fb5ad5d
1 Parent(s): ae820b0

Delete tokenization_bert.py

Browse files
Files changed (1) hide show
  1. tokenization_bert.py +0 -554
tokenization_bert.py DELETED
@@ -1,554 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Tokenization classes for Bert."""
16
-
17
-
18
- import collections
19
- import os
20
- import unicodedata
21
- from typing import List, Optional, Tuple
22
-
23
- from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
24
- from ...utils import logging
25
-
26
-
27
- logger = logging.get_logger(__name__)
28
-
29
- VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
30
-
31
- PRETRAINED_VOCAB_FILES_MAP = {
32
- "vocab_file": {
33
- "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
34
- "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
35
- "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
36
- "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
37
- "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt",
38
- "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
39
- "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
40
- "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
41
- "bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt",
42
- "bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt",
43
- "bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt",
44
- "bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt",
45
- "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt",
46
- "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
47
- "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt",
48
- "TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt",
49
- "TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt",
50
- "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt",
51
- }
52
- }
53
-
54
- PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
55
- "bert-base-uncased": 512,
56
- "bert-large-uncased": 512,
57
- "bert-base-cased": 512,
58
- "bert-large-cased": 512,
59
- "bert-base-multilingual-uncased": 512,
60
- "bert-base-multilingual-cased": 512,
61
- "bert-base-chinese": 512,
62
- "bert-base-german-cased": 512,
63
- "bert-large-uncased-whole-word-masking": 512,
64
- "bert-large-cased-whole-word-masking": 512,
65
- "bert-large-uncased-whole-word-masking-finetuned-squad": 512,
66
- "bert-large-cased-whole-word-masking-finetuned-squad": 512,
67
- "bert-base-cased-finetuned-mrpc": 512,
68
- "bert-base-german-dbmdz-cased": 512,
69
- "bert-base-german-dbmdz-uncased": 512,
70
- "TurkuNLP/bert-base-finnish-cased-v1": 512,
71
- "TurkuNLP/bert-base-finnish-uncased-v1": 512,
72
- "wietsedv/bert-base-dutch-cased": 512,
73
- }
74
-
75
- PRETRAINED_INIT_CONFIGURATION = {
76
- "bert-base-uncased": {"do_lower_case": True},
77
- "bert-large-uncased": {"do_lower_case": True},
78
- "bert-base-cased": {"do_lower_case": False},
79
- "bert-large-cased": {"do_lower_case": False},
80
- "bert-base-multilingual-uncased": {"do_lower_case": True},
81
- "bert-base-multilingual-cased": {"do_lower_case": False},
82
- "bert-base-chinese": {"do_lower_case": False},
83
- "bert-base-german-cased": {"do_lower_case": False},
84
- "bert-large-uncased-whole-word-masking": {"do_lower_case": True},
85
- "bert-large-cased-whole-word-masking": {"do_lower_case": False},
86
- "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
87
- "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
88
- "bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
89
- "bert-base-german-dbmdz-cased": {"do_lower_case": False},
90
- "bert-base-german-dbmdz-uncased": {"do_lower_case": True},
91
- "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
92
- "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
93
- "wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
94
- }
95
-
96
-
97
- def load_vocab(vocab_file):
98
- """Loads a vocabulary file into a dictionary."""
99
- vocab = collections.OrderedDict()
100
- with open(vocab_file, "r", encoding="utf-8") as reader:
101
- tokens = reader.readlines()
102
- for index, token in enumerate(tokens):
103
- token = token.rstrip("\n")
104
- vocab[token] = index
105
- return vocab
106
-
107
-
108
- def whitespace_tokenize(text):
109
- """Runs basic whitespace cleaning and splitting on a piece of text."""
110
- text = text.strip()
111
- if not text:
112
- return []
113
- tokens = text.split()
114
- return tokens
115
-
116
-
117
- class BertTokenizer(PreTrainedTokenizer):
118
- r"""
119
- Construct a BERT tokenizer. Based on WordPiece.
120
-
121
- This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
122
- this superclass for more information regarding those methods.
123
-
124
- Args:
125
- vocab_file (`str`):
126
- File containing the vocabulary.
127
- do_lower_case (`bool`, *optional*, defaults to `True`):
128
- Whether or not to lowercase the input when tokenizing.
129
- do_basic_tokenize (`bool`, *optional*, defaults to `True`):
130
- Whether or not to do basic tokenization before WordPiece.
131
- never_split (`Iterable`, *optional*):
132
- Collection of tokens which will never be split during tokenization. Only has an effect when
133
- `do_basic_tokenize=True`
134
- unk_token (`str`, *optional*, defaults to `"[UNK]"`):
135
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
136
- token instead.
137
- sep_token (`str`, *optional*, defaults to `"[SEP]"`):
138
- The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
139
- sequence classification or for a text and a question for question answering. It is also used as the last
140
- token of a sequence built with special tokens.
141
- pad_token (`str`, *optional*, defaults to `"[PAD]"`):
142
- The token used for padding, for example when batching sequences of different lengths.
143
- cls_token (`str`, *optional*, defaults to `"[CLS]"`):
144
- The classifier token which is used when doing sequence classification (classification of the whole sequence
145
- instead of per-token classification). It is the first token of the sequence when built with special tokens.
146
- mask_token (`str`, *optional*, defaults to `"[MASK]"`):
147
- The token used for masking values. This is the token used when training this model with masked language
148
- modeling. This is the token which the model will try to predict.
149
- tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
150
- Whether or not to tokenize Chinese characters.
151
-
152
- This should likely be deactivated for Japanese (see this
153
- [issue](https://github.com/huggingface/transformers/issues/328)).
154
- strip_accents (`bool`, *optional*):
155
- Whether or not to strip all accents. If this option is not specified, then it will be determined by the
156
- value for `lowercase` (as in the original BERT).
157
- """
158
-
159
- vocab_files_names = VOCAB_FILES_NAMES
160
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
161
- pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
162
- max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
163
-
164
- def __init__(
165
- self,
166
- vocab_file,
167
- do_lower_case=True,
168
- do_basic_tokenize=True,
169
- never_split=None,
170
- unk_token="[UNK]",
171
- sep_token="[SEP]",
172
- pad_token="[PAD]",
173
- cls_token="[CLS]",
174
- mask_token="[MASK]",
175
- tokenize_chinese_chars=True,
176
- strip_accents=None,
177
- **kwargs
178
- ):
179
- super().__init__(
180
- do_lower_case=do_lower_case,
181
- do_basic_tokenize=do_basic_tokenize,
182
- never_split=never_split,
183
- unk_token=unk_token,
184
- sep_token=sep_token,
185
- pad_token=pad_token,
186
- cls_token=cls_token,
187
- mask_token=mask_token,
188
- tokenize_chinese_chars=tokenize_chinese_chars,
189
- strip_accents=strip_accents,
190
- **kwargs,
191
- )
192
-
193
- if not os.path.isfile(vocab_file):
194
- raise ValueError(
195
- f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained "
196
- "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
197
- )
198
- self.vocab = load_vocab(vocab_file)
199
- self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
200
- self.do_basic_tokenize = do_basic_tokenize
201
- if do_basic_tokenize:
202
- self.basic_tokenizer = BasicTokenizer(
203
- do_lower_case=do_lower_case,
204
- never_split=never_split,
205
- tokenize_chinese_chars=tokenize_chinese_chars,
206
- strip_accents=strip_accents,
207
- )
208
- self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
209
-
210
- @property
211
- def do_lower_case(self):
212
- return self.basic_tokenizer.do_lower_case
213
-
214
- @property
215
- def vocab_size(self):
216
- return len(self.vocab)
217
-
218
- def get_vocab(self):
219
- return dict(self.vocab, **self.added_tokens_encoder)
220
-
221
- def _tokenize(self, text):
222
- split_tokens = []
223
- if self.do_basic_tokenize:
224
- for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
225
-
226
- # If the token is part of the never_split set
227
- if token in self.basic_tokenizer.never_split:
228
- split_tokens.append(token)
229
- else:
230
- split_tokens += self.wordpiece_tokenizer.tokenize(token)
231
- else:
232
- split_tokens = self.wordpiece_tokenizer.tokenize(text)
233
- return split_tokens
234
-
235
- def _convert_token_to_id(self, token):
236
- """Converts a token (str) in an id using the vocab."""
237
- return self.vocab.get(token, self.vocab.get(self.unk_token))
238
-
239
- def _convert_id_to_token(self, index):
240
- """Converts an index (integer) in a token (str) using the vocab."""
241
- return self.ids_to_tokens.get(index, self.unk_token)
242
-
243
- def convert_tokens_to_string(self, tokens):
244
- """Converts a sequence of tokens (string) in a single string."""
245
- out_string = " ".join(tokens).replace(" ##", "").strip()
246
- return out_string
247
-
248
- def build_inputs_with_special_tokens(
249
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
250
- ) -> List[int]:
251
- """
252
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
253
- adding special tokens. A BERT sequence has the following format:
254
-
255
- - single sequence: `[CLS] X [SEP]`
256
- - pair of sequences: `[CLS] A [SEP] B [SEP]`
257
-
258
- Args:
259
- token_ids_0 (`List[int]`):
260
- List of IDs to which the special tokens will be added.
261
- token_ids_1 (`List[int]`, *optional*):
262
- Optional second list of IDs for sequence pairs.
263
-
264
- Returns:
265
- `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
266
- """
267
- if token_ids_1 is None:
268
- return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
269
- cls = [self.cls_token_id]
270
- sep = [self.sep_token_id]
271
- return cls + token_ids_0 + sep + token_ids_1 + sep
272
-
273
- def get_special_tokens_mask(
274
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
275
- ) -> List[int]:
276
- """
277
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
278
- special tokens using the tokenizer `prepare_for_model` method.
279
-
280
- Args:
281
- token_ids_0 (`List[int]`):
282
- List of IDs.
283
- token_ids_1 (`List[int]`, *optional*):
284
- Optional second list of IDs for sequence pairs.
285
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
286
- Whether or not the token list is already formatted with special tokens for the model.
287
-
288
- Returns:
289
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
290
- """
291
-
292
- if already_has_special_tokens:
293
- return super().get_special_tokens_mask(
294
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
295
- )
296
-
297
- if token_ids_1 is not None:
298
- return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
299
- return [1] + ([0] * len(token_ids_0)) + [1]
300
-
301
- def create_token_type_ids_from_sequences(
302
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
303
- ) -> List[int]:
304
- """
305
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
306
- pair mask has the following format:
307
-
308
- ```
309
- 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
310
- | first sequence | second sequence |
311
- ```
312
-
313
- If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
314
-
315
- Args:
316
- token_ids_0 (`List[int]`):
317
- List of IDs.
318
- token_ids_1 (`List[int]`, *optional*):
319
- Optional second list of IDs for sequence pairs.
320
-
321
- Returns:
322
- `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
323
- """
324
- sep = [self.sep_token_id]
325
- cls = [self.cls_token_id]
326
- if token_ids_1 is None:
327
- return len(cls + token_ids_0 + sep) * [0]
328
- return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
329
-
330
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
331
- index = 0
332
- if os.path.isdir(save_directory):
333
- vocab_file = os.path.join(
334
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
335
- )
336
- else:
337
- vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
338
- with open(vocab_file, "w", encoding="utf-8") as writer:
339
- for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
340
- if index != token_index:
341
- logger.warning(
342
- f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
343
- " Please check that the vocabulary is not corrupted!"
344
- )
345
- index = token_index
346
- writer.write(token + "\n")
347
- index += 1
348
- return (vocab_file,)
349
-
350
-
351
- class BasicTokenizer(object):
352
- """
353
- Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
354
-
355
- Args:
356
- do_lower_case (`bool`, *optional*, defaults to `True`):
357
- Whether or not to lowercase the input when tokenizing.
358
- never_split (`Iterable`, *optional*):
359
- Collection of tokens which will never be split during tokenization. Only has an effect when
360
- `do_basic_tokenize=True`
361
- tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
362
- Whether or not to tokenize Chinese characters.
363
-
364
- This should likely be deactivated for Japanese (see this
365
- [issue](https://github.com/huggingface/transformers/issues/328)).
366
- strip_accents: (`bool`, *optional*):
367
- Whether or not to strip all accents. If this option is not specified, then it will be determined by the
368
- value for `lowercase` (as in the original BERT).
369
- """
370
-
371
- def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
372
- if never_split is None:
373
- never_split = []
374
- self.do_lower_case = do_lower_case
375
- self.never_split = set(never_split)
376
- self.tokenize_chinese_chars = tokenize_chinese_chars
377
- self.strip_accents = strip_accents
378
-
379
- def tokenize(self, text, never_split=None):
380
- """
381
- Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see
382
- WordPieceTokenizer.
383
-
384
- Args:
385
- never_split (`List[str]`, *optional*)
386
- Kept for backward compatibility purposes. Now implemented directly at the base class level (see
387
- [`PreTrainedTokenizer.tokenize`]) List of token not to split.
388
- """
389
- # union() returns a new set by concatenating the two sets.
390
- never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
391
- text = self._clean_text(text)
392
-
393
- # This was added on November 1st, 2018 for the multilingual and Chinese
394
- # models. This is also applied to the English models now, but it doesn't
395
- # matter since the English models were not trained on any Chinese data
396
- # and generally don't have any Chinese data in them (there are Chinese
397
- # characters in the vocabulary because Wikipedia does have some Chinese
398
- # words in the English Wikipedia.).
399
- if self.tokenize_chinese_chars:
400
- text = self._tokenize_chinese_chars(text)
401
- orig_tokens = whitespace_tokenize(text)
402
- split_tokens = []
403
- for token in orig_tokens:
404
- if token not in never_split:
405
- if self.do_lower_case:
406
- token = token.lower()
407
- if self.strip_accents is not False:
408
- token = self._run_strip_accents(token)
409
- elif self.strip_accents:
410
- token = self._run_strip_accents(token)
411
- split_tokens.extend(self._run_split_on_punc(token, never_split))
412
-
413
- output_tokens = whitespace_tokenize(" ".join(split_tokens))
414
- return output_tokens
415
-
416
- def _run_strip_accents(self, text):
417
- """Strips accents from a piece of text."""
418
- text = unicodedata.normalize("NFD", text)
419
- output = []
420
- for char in text:
421
- cat = unicodedata.category(char)
422
- if cat == "Mn":
423
- continue
424
- output.append(char)
425
- return "".join(output)
426
-
427
- def _run_split_on_punc(self, text, never_split=None):
428
- """Splits punctuation on a piece of text."""
429
- if never_split is not None and text in never_split:
430
- return [text]
431
- chars = list(text)
432
- i = 0
433
- start_new_word = True
434
- output = []
435
- while i < len(chars):
436
- char = chars[i]
437
- if _is_punctuation(char):
438
- output.append([char])
439
- start_new_word = True
440
- else:
441
- if start_new_word:
442
- output.append([])
443
- start_new_word = False
444
- output[-1].append(char)
445
- i += 1
446
-
447
- return ["".join(x) for x in output]
448
-
449
- def _tokenize_chinese_chars(self, text):
450
- """Adds whitespace around any CJK character."""
451
- output = []
452
- for char in text:
453
- cp = ord(char)
454
- if self._is_chinese_char(cp):
455
- output.append(" ")
456
- output.append(char)
457
- output.append(" ")
458
- else:
459
- output.append(char)
460
- return "".join(output)
461
-
462
- def _is_chinese_char(self, cp):
463
- """Checks whether CP is the codepoint of a CJK character."""
464
- # This defines a "chinese character" as anything in the CJK Unicode block:
465
- # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
466
- #
467
- # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
468
- # despite its name. The modern Korean Hangul alphabet is a different block,
469
- # as is Japanese Hiragana and Katakana. Those alphabets are used to write
470
- # space-separated words, so they are not treated specially and handled
471
- # like the all of the other languages.
472
- if (
473
- (cp >= 0x4E00 and cp <= 0x9FFF)
474
- or (cp >= 0x3400 and cp <= 0x4DBF) #
475
- or (cp >= 0x20000 and cp <= 0x2A6DF) #
476
- or (cp >= 0x2A700 and cp <= 0x2B73F) #
477
- or (cp >= 0x2B740 and cp <= 0x2B81F) #
478
- or (cp >= 0x2B820 and cp <= 0x2CEAF) #
479
- or (cp >= 0xF900 and cp <= 0xFAFF)
480
- or (cp >= 0x2F800 and cp <= 0x2FA1F) #
481
- ): #
482
- return True
483
-
484
- return False
485
-
486
- def _clean_text(self, text):
487
- """Performs invalid character removal and whitespace cleanup on text."""
488
- output = []
489
- for char in text:
490
- cp = ord(char)
491
- if cp == 0 or cp == 0xFFFD or _is_control(char):
492
- continue
493
- if _is_whitespace(char):
494
- output.append(" ")
495
- else:
496
- output.append(char)
497
- return "".join(output)
498
-
499
-
500
- class WordpieceTokenizer(object):
501
- """Runs WordPiece tokenization."""
502
-
503
- def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
504
- self.vocab = vocab
505
- self.unk_token = unk_token
506
- self.max_input_chars_per_word = max_input_chars_per_word
507
-
508
- def tokenize(self, text):
509
- """
510
- Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
511
- tokenization using the given vocabulary.
512
-
513
- For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
514
-
515
- Args:
516
- text: A single token or whitespace separated tokens. This should have
517
- already been passed through *BasicTokenizer*.
518
-
519
- Returns:
520
- A list of wordpiece tokens.
521
- """
522
-
523
- output_tokens = []
524
- for token in whitespace_tokenize(text):
525
- chars = list(token)
526
- if len(chars) > self.max_input_chars_per_word:
527
- output_tokens.append(self.unk_token)
528
- continue
529
-
530
- is_bad = False
531
- start = 0
532
- sub_tokens = []
533
- while start < len(chars):
534
- end = len(chars)
535
- cur_substr = None
536
- while start < end:
537
- substr = "".join(chars[start:end])
538
- if start > 0:
539
- substr = "##" + substr
540
- if substr in self.vocab:
541
- cur_substr = substr
542
- break
543
- end -= 1
544
- if cur_substr is None:
545
- is_bad = True
546
- break
547
- sub_tokens.append(cur_substr)
548
- start = end
549
-
550
- if is_bad:
551
- output_tokens.append(self.unk_token)
552
- else:
553
- output_tokens.extend(sub_tokens)
554
- return output_tokens