osanseviero's picture
Update app.py
b85b616 verified
import numpy as np
import matplotlib.pyplot as plt
import gradio as gr
description = """## Token Probability Distribution Explorer
This interactive tool lets you visualize how different parameters affect the probability distribution of tokens.
- **Temperature**: Controls the randomness of predictions. Higher values (e.g., 2.0) make the distribution more uniform, while lower values (e.g., 0.1) make it peakier.
- **Top-k**: Limits the number of most likely tokens to consider. For example, `top_k=5` means only the top 5 tokens are considered, and others are set to zero probability.
- **Top-p (nucleus sampling)**: Limits the tokens to those whose cumulative probability mass is below a certain threshold. For instance, `top_p=0.9` means only tokens contributing to the top 90% of probability are considered.
Adjust the sliders to see how each parameter influences the token probabilities. All tokens will always have some non-zero probability in the initial distribution.
To learn more about LLM generation, check out the early release of [Hands-On Generative AI with Transformers and Diffusion Models](https://learning.oreilly.com/library/view/hands-on-generative-ai/9781098149239/).
"""
def get_initial_distribution(num_tokens=10, min_prob=1e-3, seed=42):
np.random.seed(seed) # For reproducibility
# Ensure each token has at least `min_prob`
baseline_probs = np.full(num_tokens, min_prob)
remaining_prob = 1.0 - num_tokens * min_prob
# Distribute the remaining probability randomly
if remaining_prob > 0:
random_probs = np.random.rand(num_tokens)
random_probs /= np.sum(random_probs) # Normalize to sum to 1
token_probs = baseline_probs + remaining_prob * random_probs
else:
# If min_prob is too high, adjust probabilities to sum to 1
token_probs = baseline_probs
token_probs /= np.sum(token_probs)
return token_probs
def adjust_distribution(temperature, top_k, top_p, initial_probs):
if temperature == 0:
# Greedy sampling: pick the token with the highest probability
max_index = np.argmax(initial_probs)
token_probs = np.zeros_like(initial_probs)
token_probs[max_index] = 1.0
else:
# Apply temperature scaling
token_probs = np.exp(np.log(initial_probs) / temperature)
token_probs /= np.sum(token_probs)
# Apply Top-K filtering
if top_k > 0:
top_k_indices = np.argsort(token_probs)[-top_k:]
top_k_probs = np.zeros_like(token_probs)
top_k_probs[top_k_indices] = token_probs[top_k_indices]
top_k_probs /= np.sum(top_k_probs) # Normalize after filtering
token_probs = top_k_probs
# Apply top_p (nucleus) filtering
if top_p < 1.0:
# Sort probabilities in descending order and compute cumulative sum
sorted_indices = np.argsort(token_probs)[::-1]
cumulative_probs = np.cumsum(token_probs[sorted_indices])
# Find the cutoff index for nucleus sampling
cutoff_index = np.searchsorted(cumulative_probs, top_p) + 1
# Get the indices that meet the threshold
top_p_indices = sorted_indices[:cutoff_index]
top_p_probs = np.zeros_like(token_probs)
top_p_probs[top_p_indices] = token_probs[top_p_indices]
top_p_probs /= np.sum(top_p_probs) # Normalize after filtering
token_probs = top_p_probs
# Plotting the probabilities
plt.figure(figsize=(10, 6))
plt.bar(range(10), token_probs, tick_label=[f'Token {i}' for i in range(10)])
plt.xlabel('Tokens')
plt.ylabel('Probabilities')
plt.title('Token Probability Distribution')
plt.ylim(0, 1)
plt.grid(True)
plt.tight_layout()
return plt
initial_probs = get_initial_distribution()
def update_plot(temperature=1.0, top_k=8, top_p=0.9):
return adjust_distribution(temperature, top_k, top_p, initial_probs)
# Generate an initial plot with default values
initial_plot = update_plot()
interface = gr.Interface(
fn=update_plot,
inputs=[
gr.Slider(0, 5.0, step=0.1, value=1.0, label="Temperature"),
gr.Slider(0, 10, step=1, value=8, label="Top-k"),
gr.Slider(0.0, 1.0, step=0.01, value=0.9, label="Top-p"),
],
outputs=gr.Plot(value=initial_plot, label="Token Probability Distribution"),
live=True,
title="Explore generation parameters of LLMs",
description=description,
)
interface.launch()