tokenizer-arena / vocab /bert_base_chinese /test_zh_coding_len.py
xu-song's picture
add compress rate
814ee6b
raw
history blame
1.85 kB
"""
1. jd_vocab_tokens的中文:
2. 中文标点
3. 全中文(单字) unicode
4. 全中文()
中文汉字数:54230, 中文标点数: 549
"""
from collections import Counter
from transformers import AutoTokenizer
from data_sample.oov_base import jd_vocab_tokens
from utils.text_util import is_zh_char, has_zh
from zhon.hanzi import punctuation as zh_punc
vocab = open("vocab.txt", "r", encoding="utf-8").readlines()
def zh_iterator():
for idx in range(ord(u'\u4e00'), ord(u'\u9fa5')):
yield (chr(idx))
def test_coding_length(vocab, filter=None):
all_length = []
for word in vocab:
if len(word) > 1:
continue
if filter is not None and filter(word):
continue
tokens = tokenizer.encode(word)
all_length.append(len(tokens))
# if len(tokens.ids) > 1:
if len(tokens.ids) == 1:
print(word, tokens.ids)
print("编码长度统计:", Counter(all_length))
print("平均编码长度:", sum(all_length)/len(all_length))
def has_zh_char(text):
return any(ch in zh_punc for ch in text)
def iter_vocab():
f_out = open("vocab.zh.txt", "w", encoding="utf-8")
zh_token_count = 0
zh_symbol_count = 0
for idx, word in enumerate(vocab):
if has_zh(decode_str):
zh_token_count += 1
f_out.write("%d\t%s\t中文汉字\n" % (idx, decode_str))
elif has_zh_char(decode_str):
zh_symbol_count += 1
f_out.write("%d\t%s\t中文标点\n" % (idx, decode_str))
print("中文汉字数:%d, 中文标点数: %d" % (zh_token_count, zh_symbol_count))
if __name__ == "__main__":
# test_coding_length(jd_vocab_tokens, filter=lambda k: not is_chinese(k))
# test_coding_length(zh_punc)
# test_coding_length(zh_iterator())
iter_vocab()