File size: 1,432 Bytes
751936e
 
 
 
 
 
 
 
 
 
 
 
 
 
9495a4f
 
751936e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46

import json
from transformers import AutoTokenizer, BloomTokenizerFast


# tokenizer = AutoTokenizer.from_pretrained("tokenizer")
tokenizer = BloomTokenizerFast.from_pretrained("tokenizer")

print("vocab size:", tokenizer.vocab_size)

# tokens = tokenizer.encode("中国\nabcde")
tokens = tokenizer.encode("中")
decode_line = tokenizer.decode(tokens)

tokenizer.save_vocabulary("tmp", "ddd")


def id2token(ids):
    return tokenizer.convert_ids_to_tokens(ids)

def test_token():
    for word in "中国解决方法黑白侗,。!?;":
        encoding = tokenizer.encode(word)
        for token_id in encoding:
            decode_str = tokenizer.decode([token_id])  # 特殊字符解码后会统一变成 �,对应 "\ufffd"
            token = id2token([token_id])
            print(word, token_id, decode_str, json.dumps(decode_str), token, json.dumps(token))


test_token()

"""
中 773 中 "\u4e2d" ['ä¸Ń'] ["\u00e4\u00b8\u0143"]
国 844 国 "\u56fd" ['åĽ½'] ["\u00e5\u013d\u00bd"]
解 3053 解 "\u89e3" ['解'] ["\u00e8\u00a7\u00a3"]
决 3051 决 "\u51b3" ['åĨ³'] ["\u00e5\u0128\u00b3"]
方 1223 方 "\u65b9" ['æĸ¹'] ["\u00e6\u0138\u00b9"]
法 1283 法 "\u6cd5" ['æ³ķ'] ["\u00e6\u00b3\u0137"]
黑 7046 黑 "\u9ed1" ['é»ij'] ["\u00e9\u00bb\u0133"]
白 4162 白 "\u767d" ['çĻ½'] ["\u00e7\u013b\u00bd"]
侗 1082 � "\ufffd" ['ä¾'] ["\u00e4\u00be"]
侗 235 � "\ufffd" ['Ĺ'] ["\u0139"]
"""