File size: 1,377 Bytes
870576b
cb7c150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
870576b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
```python
def DummyTok(model_max_length=4):                                                 
    import tempfile                                                                  
                                                                                     
    from tokenizers import Tokenizer, models                                         
    from transformers.tokenization_utils_fast import PreTrainedTokenizerFast            
                                                                                     
    vocab = [(chr(i), i) for i in range(256)]                                        
    tokenizer = Tokenizer(models.Unigram(vocab))                                                                                                             
    with tempfile.NamedTemporaryFile() as f:                                         
        tokenizer.save(f.name)                                                       
        real_tokenizer = PreTrainedTokenizerFast(                                     
            tokenizer_file=f.name, model_max_length=model_max_length
        )                                                                        
                                                                                 
    real_tokenizer.save("dummy_tokenizer.json")                                
    return real_tokenizer
```