tokenizer-vocab / filter_zh.py
chuckchen's picture
Add vocabulary dumped from various tokenizers together with processing scripts
7ab99c9
import re
import sys
import json
from collections import OrderedDict
if len(sys.argv) <= 1:
sys.stderr.write("Expect a file name")
sys.exit(-1)
vocab_json = sys.argv[1]
#pattern = re.compile(r'\p{Han}')
pattern = re.compile(r'[\u4e00-\u9fff\u3400-\u4dbf\U00020000-\U0002a6df\U0002a700-\U0002ebef\U00030000-\U000323af\ufa0e\ufa0f\ufa11\ufa13\ufa14\ufa1f\ufa21\ufa23\ufa24\ufa27\ufa28\ufa29\u3006\u3007][\ufe00-\ufe0f\U000e0100-\U000e01ef]?')
vocab_filtered = OrderedDict()
with open(vocab_json, "r", encoding="utf8") as f:
vocab = json.load(f)
for code, token in enumerate(vocab):
#if len(token) > 1 and pattern.search(token):
if pattern.search(token):
vocab_filtered[token] = code
suffix_idx = vocab_json.rindex('.')
vocab_f_json = f"{vocab_json[:suffix_idx]}_f{vocab_json[suffix_idx:]}"
with open(vocab_f_json, "w", encoding="utf8") as f:
json.dump(vocab_filtered, f, indent=4, ensure_ascii=False)