Spaces:
Sleeping
Sleeping
File size: 2,239 Bytes
8e147a8 5025ad7 8e147a8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import html
import streamlit as st
from transformers import AutoTokenizer
import colorsys
st.set_page_config(layout="wide", page_title="Text Tokenizer")
def get_random_color(token_id):
# Generate a color based on the token id to ensure consistency
hue = (hash(str(token_id)) % 1000) / 1000.0
return f"hsla({int(hue * 360)}, 70%, 30%, 70%)"
def load_tokenizer(model_name="Qwen/Qwen2.5-Coder-7B-Instruct"):
if 'tokenizer' not in st.session_state:
st.session_state.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
return st.session_state.tokenizer
st.title("Text Tokenizer")
selected_model = "Qwen/Qwen2.5-Coder-7B-Instruct"
# Load tokenizer based on selection
try:
tokenizer = load_tokenizer(selected_model)
st.success(f"Loaded tokenizer: {selected_model}")
except Exception as e:
st.error(f"Failed to load tokenizer: {e}")
st.stop()
# Input text area
input_text = st.text_area("Enter text to tokenize", height=200)
# Tokenize button
if st.button("Tokenize") and input_text:
tokens = tokenizer.encode(input_text)
st.write(f"Total tokens: {len(tokens)}")
# Generate colored text visualization
result = ""
prev_tokens = []
prev_string = ""
for token in tokens:
color = get_random_color(token)
current_string = tokenizer.decode(prev_tokens + [token])
prev_tokens.append(token)
current_delta = current_string[len(prev_string):]
prev_string = current_string
current_delta = html.escape(current_delta)
current_delta = (current_delta
.replace("\n", "↵<br/>")
.replace(" ", " ")
.replace("\t", " "))
result += f'<span style="background-color: {color};">{current_delta}</span>'
st.html(f'<pre style="background-color: #222; padding: 10px; font-family: Courier, monospace;">{result}</pre>')
# Show raw tokens (optional)
with st.expander("View raw tokens"):
token_strings = [tokenizer.decode([t]) for t in tokens]
for i, (token_id, token_str) in enumerate(zip(tokens, token_strings)):
st.write(f"{i}: Token ID {token_id} → '{token_str}'") |