Text-Generator / app.py
IFMedTechdemo's picture
Fix model dropdown to show only names and improve output display
7a2bcda verified
raw
history blame
7.99 kB
import gradio as gr
import requests
import json
# Function to get available models from Pollinations API
def get_available_models():
try:
response = requests.get("https://text.pollinations.ai/models")
if response.status_code == 200:
models_data = response.json()
# Extract just the model names if API returns complex structure
if isinstance(models_data, list):
# If it's a list of strings, return as is
if all(isinstance(m, str) for m in models_data):
return models_data
# If it's a list of dicts, extract model names/id only
elif all(isinstance(m, dict) for m in models_data):
model_names = []
for m in models_data:
# Try to get 'name' or 'id' field, ignore everything else
if 'name' in m and isinstance(m['name'], str):
model_names.append(m['name'])
elif 'id' in m and isinstance(m['id'], str):
model_names.append(m['id'])
return model_names if model_names else [
"openai", "mistral", "mistral-large",
"claude-3.5-sonnet", "llama-3.3-70b", "gemini"
]
# Fallback to default list
return [
"openai",
"mistral",
"mistral-large",
"claude-3.5-sonnet",
"llama-3.3-70b",
"gemini"
]
else:
# Fallback list of models
return [
"openai",
"mistral",
"mistral-large",
"claude-3.5-sonnet",
"llama-3.3-70b",
"gemini"
]
except:
return [
"openai",
"mistral",
"mistral-large",
"claude-3.5-sonnet",
"llama-3.3-70b",
"gemini"
]
# Function to generate text using Pollinations API
def generate_text(prompt, model, seed, system, temperature, max_tokens, top_p):
if not prompt:
return "Please enter a prompt."
try:
# Prepare the API request using the same format as user's code
url = "https://text.pollinations.ai/"
# Build the query parameters
params = {
"model": model,
"prompt": prompt,
}
# Add optional parameters if provided
if seed:
params["seed"] = int(seed)
if system:
params["system"] = system
if temperature is not None:
params["temperature"] = temperature
if max_tokens:
params["max_tokens"] = int(max_tokens)
if top_p is not None:
params["top_p"] = top_p
# Make the request
response = requests.get(url, params=params)
if response.status_code == 200:
result_text = response.text
# Try to parse as JSON for better formatting
try:
json_result = json.loads(result_text)
return f"```json\n{json.dumps(json_result, indent=2)}\n```"
except:
# Return as plain text if not JSON
return result_text
else:
return f"Error: API returned status code {response.status_code}\n{response.text}"
except Exception as e:
return f"Error: {str(e)}"
# Get available models
available_models = get_available_models()
# Create Gradio interface
with gr.Blocks(title="Pollinations Text Generator") as demo:
gr.Markdown(
"""
# 🌸 Pollinations Text Generator
Generate text using various AI models via the Pollinations API.
Select a model and provide a prompt to get started!
"""
)
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(
label="Prompt",
placeholder="Enter your text prompt here...",
lines=5
)
model_dropdown = gr.Dropdown(
choices=available_models,
label="Model",
value=available_models[0] if available_models else "openai",
info="Select the AI model to use for text generation"
)
with gr.Accordion("Advanced Settings", open=False):
seed_input = gr.Number(
label="Seed (optional)",
value=None,
precision=0,
info="Random seed for reproducible results"
)
system_input = gr.Textbox(
label="System Prompt (optional)",
placeholder="Enter system instructions...",
lines=2,
info="System-level instructions for the model"
)
temperature_slider = gr.Slider(
minimum=0,
maximum=2,
value=0.7,
step=0.1,
label="Temperature",
info="Controls randomness (higher = more creative)"
)
max_tokens_slider = gr.Slider(
minimum=1,
maximum=2048,
value=512,
step=1,
label="Max Tokens",
info="Maximum length of the generated text"
)
top_p_slider = gr.Slider(
minimum=0,
maximum=1,
value=0.9,
step=0.05,
label="Top P",
info="Nucleus sampling parameter"
)
generate_btn = gr.Button("Generate", variant="primary")
with gr.Column():
output_display = gr.Markdown(
value="_Your generated text will appear here..._",
label="Generated Text"
)
# Add a readonly textbox for easy copying
with gr.Accordion("Copy Output (Plain Text)", open=False):
output_copy = gr.Textbox(
label="Copyable Output",
lines=15,
show_copy_button=True,
interactive=False
)
gr.Markdown(
"""
### About
This Space uses the [Pollinations API](https://github.com/pollinations/pollinations) for text generation.
The API supports multiple models and is free to use.
**Parameters:**
- **Model**: Choose from available AI models
- **Seed**: Set a random seed for reproducible outputs
- **System**: Provide system-level instructions
- **Temperature**: Control response creativity (0=deterministic, 2=very creative)
- **Max Tokens**: Set maximum response length
- **Top P**: Control diversity via nucleus sampling
"""
)
# Set up the generate button action
def generate_and_display(prompt, model, seed, system, temp, max_tok, top_p):
result = generate_text(prompt, model, seed, system, temp, max_tok, top_p)
# Return both markdown formatted and plain text versions
return result, result
generate_btn.click(
fn=generate_and_display,
inputs=[
prompt_input,
model_dropdown,
seed_input,
system_input,
temperature_slider,
max_tokens_slider,
top_p_slider
],
outputs=[output_display, output_copy]
)
# Launch the app
if __name__ == "__main__":
demo.launch()