```python | |
def DummyTok(model_max_length=4): | |
import tempfile | |
from tokenizers import Tokenizer, models | |
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast | |
vocab = [(chr(i), i) for i in range(256)] | |
tokenizer = Tokenizer(models.Unigram(vocab)) | |
with tempfile.NamedTemporaryFile() as f: | |
tokenizer.save(f.name) | |
real_tokenizer = PreTrainedTokenizerFast( | |
tokenizer_file=f.name, model_max_length=model_max_length | |
) | |
real_tokenizer.save("dummy_tokenizer.json") | |
return real_tokenizer | |
``` |