```python import tempfile from tokenizers import Tokenizer, models, processors from transformers.tokenization_utils_fast import PreTrainedTokenizerFast vocab = [(chr(i), i) for i in range(256)] tokenizer = Tokenizer(models.Unigram(vocab)) tokenizer.add_special_tokens(["", ""]) tokenizer.post_processor = processors.TemplateProcessing( single=" $0 ", special_tokens=[("", 256), ("", 257)] ) with tempfile.NamedTemporaryFile() as f: tokenizer.save(f.name) real_tokenizer = PreTrainedTokenizerFast(tokenizer_file=f.name, eos_token="", bos_token="") real_tokenizer._tokenizer.save("dummy.json") ``` Small change.