tokenizer-vocab / extract.py
chuckchen's picture
Add vocabulary dumped from various tokenizers together with processing scripts
7ab99c9
#!/usr/bin/env python3
import sys
from transformers import AutoTokenizer
import json
model = sys.argv[1] if len(sys.argv) >= 2 else "nghuyong/ernie-3.0-base-zh"
tokenizer = AutoTokenizer.from_pretrained(model)
vocabulary = tokenizer.get_vocab()
vocab_json = f"./encoder_vocab_{model.split('/')[-1]}.json"
with open(vocab_json, "w", encoding='utf8') as log_file:
json.dump(dict(sorted(vocabulary.items(), key=lambda x: x[1])), log_file, indent=4, ensure_ascii=False)