xu-song's picture
add compress rate
814ee6b
raw
history blame
115 Bytes
from vocab.glm_chinese import tokenizer
print(tokenizer.decode([20]))
vocab = tokenizer.get_vocab()
print(vocab)