File size: 1,368 Bytes
20b3af9
 
 
 
 
 
 
 
 
 
30231a5
20b3af9
 
 
 
 
 
 
 
 
 
 
d2883dc
20b3af9
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
from transformers import AutoTokenizer
from huggingface_hub import repo_exists

def token_viz(model_name, text):
    if not repo_exists(model_name):
        gr.Error(f"{model_name} is not a valid HF repo. Please enter a valid repo.")
    tokenizer = AutoTokenizer.from_pretrained(model_name,cache_dir=f"./.cache/hf/{model_name}")
    print(model_name,text)
    tokens = tokenizer.encode(text)
    return [(tokenizer.decode(token).replace("\n",r"\n"), str(token)) for token in tokens] # Replacing '\n' for visualization purposes  


MARKDOWN = """
<h1 style='text-align: center; margin-bottom: 1rem'><div align="center">Token Visualizer ⚔️</div></h1>

Enter the Tokenizer you want to use to visualize the tokens.

Example: To use <https://huggingface.co/Qwen/Qwen2-72B-Instruct> model's tokenizer just enter **Qwen/Qwen2-72B-Instruct**
"""


with gr.Blocks(analytics_enabled=False) as demo:
    gr.Markdown(MARKDOWN)
    with gr.Row():
        model_name = gr.Textbox(label="repo_name",interactive=True,placeholder="Enter the HF model here...")
        text = gr.Textbox(label="text",interactive=True, placeholder="Enter the text to be tokenized")
        output1 = gr.HighlightedText(show_inline_category=True)
        
    btn = gr.Button("Run")
    btn.click(token_viz, inputs=[model_name, text], outputs=[output1])
    
demo.queue().launch()