File size: 1,905 Bytes
751936e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""
最简单的tokenizer
"""




import json
from tokenizers import Tokenizer

tokenizer = Tokenizer.from_file("20B_tokenizer_chinese.v2.json")
print("vocab_size with added_tokens:", tokenizer.get_vocab_size(with_added_tokens=True))
print("vocab_size without added_tokens:", tokenizer.get_vocab_size(with_added_tokens=False))

# vocab = tokenizer.get_vocab()


def test_token():
    """
    TODO: 特殊符号编码有问题 〚 <
    :return:
    """
    # text = " \t\n中国解决方法黑白侗鸩玥,。!?;〚 "
    text = "中国解决方法黑白侗鸩,。!?;一个人去哪里疗疗 一 个刹车卉〚卟<‛⦆"
    # text = open("../../data_sample/EBKE20150806001_epub_30198917_30198917.txt", "r", encoding="utf-8").readline()
    encoding = tokenizer.encode(text)
    decoding = tokenizer.decode(encoding.ids)
    print(decoding)
    for word in text:
        encoding = tokenizer.encode(word)
        for token_id in encoding.ids:
            decode_str = tokenizer.decode([token_id])  # 特殊字符解码后会统一变成 �,对应 "\ufffd"
            token = tokenizer.id_to_token(token_id)
            print(word, token_id, decode_str, json.dumps(decode_str), token, json.dumps(token))

def test_encode():
    text = "中国解决方法黑白侗鸩,。!?;一个人去哪里疗疗<|endoftext|>一 个刹车卉〚卟<‛⦆"
    encoding = tokenizer.encode(text)
    print(tokenizer.decode(encoding.ids))
    for token_id in encoding.ids:
        decode_str = tokenizer.decode([token_id])  # 特殊字符解码后会统一变成 �,对应 "\ufffd"
        token = tokenizer.id_to_token(token_id)
        print(token_id, decode_str, json.dumps(decode_str), token, json.dumps(token))

def test_decode():

    encoding = []

    decode_str = tokenizer.decode(encoding)
    print(decode_str)

# test_token()

test_encode()
# test_token()
# test_decode()