File size: 375 Bytes
751936e
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

import os
from tokenizers import Tokenizer


CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
TOKENIZER_DIR = os.path.join(CURRENT_DIR, "20B_tokenizer_chinese.json")

tokenizer = Tokenizer.from_file(TOKENIZER_DIR)

tokenizer.vocab_size = tokenizer.get_vocab_size(with_added_tokens=True)

# vocab_size = len(tokenizer.get_vocab())
# vocab_size = tokenizer.vocab_size