|
|
|
|
|
|
|
|
|
""" |
|
## TODO: |
|
- http get方式获取参数, |
|
- 自启动 |
|
- iter_vocab 的 warmup |
|
- add_special_token 开关 |
|
- theme 开关 light/dark |
|
- token_id/tokens/bytes 开关 |
|
- 通过 javascript 添加 hover_text |
|
- |
|
|
|
|
|
|
|
plots |
|
|
|
table |
|
|
|
## related demo |
|
- [](http://text-processing.com/demo/tokenize/) |
|
- [gpt-tokenizer](https://gpt-tokenizer.dev/) |
|
- [llama-tokenizer-js](https://belladoreai.github.io/llama-tokenizer-js/example-demo/build/) |
|
- [](https://huggingface.co/spaces/Xenova/the-tokenizer-playground) |
|
|
|
## 可视化 |
|
|
|
[ The, 2, QUICK, Brown, Foxes, jumped, over, the, lazy, dog's, bone ] |
|
""" |
|
|
|
|
|
import gradio as gr |
|
|
|
from vocab import all_tokenizers |
|
from util import * |
|
|
|
example_text = """Replace this text in the input field to see how tokenization works |
|
华为智能音箱发布:华为Sound X""" |
|
|
|
|
|
examples = [ |
|
|
|
["标点测试:,。!?;", "baichuan_7b", "llama"], |
|
["符号测试:🦙❤❥웃유♋☮✊☏☢☚✔☑♚▢♪✈✞÷↑↓▤▥⊙■□▣▽¿─│♥❣▬▫☿Ⓐ ✋✉☣☤", "baichuan_7b", "llama"], |
|
["中文简体:宽带,繁体:樂來", "baichuan_7b", "llama"], |
|
["数字测试:(10086 + 98) = 100184", "baichuan_7b", "llama"], |
|
] |
|
|
|
|
|
def example_fn(example_idx): |
|
return examples[example_idx] |
|
|
|
|
|
|
|
with gr.Blocks(css="style.css") as demo: |
|
gr.HTML("""<h1 align="center">Tokenizer Arena ⚔️</h1>""") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
gr.Markdown("## Input Text") |
|
dropdown_examples = gr.Dropdown( |
|
["Example1", "Example2", "Example3"], |
|
value="Examples", |
|
type="index", |
|
show_label=False, |
|
container=False, |
|
scale=0, |
|
elem_classes="example-style" |
|
) |
|
|
|
user_input = gr.Textbox( |
|
value=example_text, |
|
label="Input Text", |
|
lines=5, |
|
show_label=False, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Markdown("## Tokenization") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=6): |
|
with gr.Group(): |
|
tokenizer_type_1 = gr.Dropdown( |
|
all_tokenizers, |
|
value="llama", |
|
label="Tokenizer 1", |
|
) |
|
with gr.Group(): |
|
""" |
|
<div class="stat"><div class="stat-value">69</div><div class="stat-label">Characters</div></div> |
|
""" |
|
with gr.Row(): |
|
stats_vocab_size_1 = gr.TextArea( |
|
label="VocabSize", |
|
lines=1, |
|
elem_classes="statistics" |
|
) |
|
stats_zh_token_size_1 = gr.TextArea( |
|
|
|
label="ZH char/word", |
|
lines=1, |
|
elem_classes="statistics" |
|
) |
|
stats_overlap_token_size_1 = gr.TextArea( |
|
label="Overlap Tokens", |
|
lines=1, |
|
elem_classes="statistics" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Image("images/VS.svg", scale=1, show_label=False, show_download_button=False, container=False) |
|
with gr.Column(scale=6): |
|
with gr.Group(): |
|
tokenizer_type_2 = gr.Dropdown( |
|
all_tokenizers, |
|
value="baichuan_7b", |
|
label="Tokenizer 2", |
|
) |
|
with gr.Group(): |
|
with gr.Row(): |
|
stats_vocab_size_2 = gr.TextArea( |
|
label="VocabSize", |
|
lines=1, |
|
elem_classes="statistics" |
|
) |
|
stats_zh_token_size_2 = gr.TextArea( |
|
|
|
label="ZH char/word", |
|
lines=1, |
|
elem_classes="statistics" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
stats_overlap_token_size_2 = gr.TextArea( |
|
label="Overlap Tokens", |
|
lines=1, |
|
elem_classes="statistics" |
|
) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
output_text_1 = gr.Highlightedtext( |
|
label="Tokens 1", |
|
show_legend=True, |
|
elem_classes="space-show" |
|
) |
|
with gr.Column(): |
|
output_text_2 = gr.Highlightedtext( |
|
label="Tokens 2", |
|
show_legend=True, |
|
elem_classes="space-show" |
|
) |
|
|
|
with gr.Row(): |
|
output_table_1 = gr.Dataframe( |
|
headers=["TokenID", "Byte", "Text"], |
|
datatype=["str", "str", "str"], |
|
|
|
) |
|
output_table_2 = gr.Dataframe( |
|
headers=["TokenID", "Token", "Text"], |
|
datatype=["str", "str", "str"], |
|
) |
|
|
|
tokenizer_type_1.change(tokenize, [user_input, tokenizer_type_1], |
|
[output_text_1, output_table_1]) |
|
|
|
tokenizer_type_1.change(basic_count, [tokenizer_type_1], [stats_vocab_size_1, stats_zh_token_size_1]) |
|
tokenizer_type_1.change(get_overlap_token_size, [tokenizer_type_1, tokenizer_type_2], |
|
[stats_overlap_token_size_1, stats_overlap_token_size_2]) |
|
|
|
user_input.change(tokenize_pair, |
|
[user_input, tokenizer_type_1, tokenizer_type_2], |
|
[output_text_1, output_table_1, output_text_2, output_table_2]) |
|
|
|
tokenizer_type_2.change(tokenize, [user_input, tokenizer_type_2], |
|
[output_text_2, output_table_2]) |
|
tokenizer_type_2.change(basic_count, [tokenizer_type_2], [stats_vocab_size_2, stats_zh_token_size_2]) |
|
tokenizer_type_2.change(get_overlap_token_size, [tokenizer_type_1, tokenizer_type_2], |
|
[stats_overlap_token_size_1, stats_overlap_token_size_2]) |
|
|
|
dropdown_examples.change( |
|
example_fn, |
|
dropdown_examples, |
|
[user_input, tokenizer_type_1, tokenizer_type_2] |
|
) |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
demo.queue(max_size=20).launch() |
|
|
|
|