|
import os
|
|
import asyncio
|
|
import gradio as gr
|
|
from dotenv import load_dotenv
|
|
import time
|
|
from prompt_enhancer import PromptEnhancer, get_available_models
|
|
|
|
|
|
load_dotenv(encoding='utf-8')
|
|
|
|
|
|
IS_HF_SPACE = os.environ.get("SPACE_ID") is not None
|
|
|
|
|
|
if IS_HF_SPACE:
|
|
|
|
api_key = os.environ.get("OPENROUTER_API_KEY")
|
|
else:
|
|
|
|
api_key = os.getenv("OPENROUTER_API_KEY")
|
|
|
|
if not api_key:
|
|
print("Warning: OPENROUTER_API_KEY not found!")
|
|
|
|
|
|
available_models = []
|
|
|
|
async def fetch_models():
|
|
"""Fetch available models from OpenRouter"""
|
|
global available_models
|
|
try:
|
|
models = await get_available_models()
|
|
available_models = models
|
|
return [f"{model['id']} - {model.get('name', 'No name')}" for model in models]
|
|
except Exception as e:
|
|
print(f"Error fetching models: {e}")
|
|
|
|
return [
|
|
"anthropic/claude-3-haiku - Claude 3 Haiku",
|
|
"anthropic/claude-3-sonnet - Claude 3 Sonnet",
|
|
"anthropic/claude-3-opus - Claude 3 Opus",
|
|
"openai/gpt-4o - GPT-4o",
|
|
"openai/gpt-4o-mini - GPT-4o Mini"
|
|
]
|
|
|
|
def get_model_id(model_display_name):
|
|
"""Extract model ID from display name"""
|
|
if " - " in model_display_name:
|
|
return model_display_name.split(" - ")[0]
|
|
return model_display_name
|
|
|
|
async def enhance_prompt(prompt, model_choice):
|
|
"""Enhance the prompt using the selected model"""
|
|
if not prompt.strip():
|
|
return "Please enter a prompt to enhance.", "", ""
|
|
|
|
start_time = time.time()
|
|
|
|
model_id = get_model_id(model_choice)
|
|
enhancer = PromptEnhancer(model_id)
|
|
|
|
try:
|
|
|
|
expanded_prompt = await enhancer.analyze_and_expand_input(prompt)
|
|
suggested_enhancements = await enhancer.suggest_enhancements(prompt)
|
|
decomposition_and_reasoning = await enhancer.decompose_and_add_reasoning(expanded_prompt)
|
|
|
|
|
|
components = {
|
|
"expanded_prompt": expanded_prompt,
|
|
"decomposition_and_reasoninng": decomposition_and_reasoning,
|
|
"suggested_enhancements": suggested_enhancements
|
|
}
|
|
|
|
advanced_prompt = await enhancer.assemble_prompt(components)
|
|
|
|
elapsed_time = time.time() - start_time
|
|
|
|
|
|
stats = f"""
|
|
Model: {model_id}
|
|
Processing Time: {elapsed_time:.2f} seconds
|
|
Prompt Tokens: {enhancer.prompt_tokens}
|
|
Completion Tokens: {enhancer.completion_tokens}
|
|
"""
|
|
|
|
return advanced_prompt, expanded_prompt, stats
|
|
except Exception as e:
|
|
return f"Error: {str(e)}", "", ""
|
|
|
|
|
|
def run_async(fn):
|
|
def wrapper(*args, **kwargs):
|
|
return asyncio.run(fn(*args, **kwargs))
|
|
return wrapper
|
|
|
|
|
|
async def create_ui():
|
|
|
|
model_choices = await fetch_models()
|
|
default_model = model_choices[0] if model_choices else "anthropic/claude-3-haiku - Claude 3 Haiku"
|
|
|
|
with gr.Blocks(title="Advanced Prompt Generator", theme=gr.themes.Soft()) as app:
|
|
gr.Markdown("""
|
|
# ๐ Advanced Prompt Generator
|
|
|
|
Transform your basic prompts into highly optimized, structured prompts for better AI responses.
|
|
|
|
## How it works:
|
|
1. Enter your basic prompt
|
|
2. Select an AI model
|
|
3. Get an enhanced, structured prompt with decomposition and reasoning
|
|
""")
|
|
|
|
with gr.Row():
|
|
with gr.Column(scale=3):
|
|
prompt_input = gr.Textbox(
|
|
label="Enter Your Basic Prompt",
|
|
placeholder="E.g. Explain quantum computing",
|
|
lines=4
|
|
)
|
|
model_dropdown = gr.Dropdown(
|
|
choices=model_choices,
|
|
label="Select Model",
|
|
value=default_model
|
|
)
|
|
refresh_button = gr.Button("๐ Refresh Models")
|
|
|
|
with gr.Row():
|
|
submit_button = gr.Button("๐ฎ Enhance Prompt", variant="primary")
|
|
clear_button = gr.Button("๐งน Clear")
|
|
|
|
with gr.Column(scale=4):
|
|
with gr.Tabs():
|
|
with gr.TabItem("Enhanced Prompt"):
|
|
enhanced_output = gr.Textbox(
|
|
label="Enhanced Prompt",
|
|
placeholder="Your enhanced prompt will appear here...",
|
|
lines=15
|
|
)
|
|
with gr.TabItem("Expanded Prompt Only"):
|
|
expanded_output = gr.Textbox(
|
|
label="Expanded Prompt",
|
|
placeholder="Your expanded prompt will appear here...",
|
|
lines=15
|
|
)
|
|
with gr.TabItem("Stats"):
|
|
stats_output = gr.Textbox(
|
|
label="Processing Stats",
|
|
lines=5
|
|
)
|
|
|
|
|
|
refresh_button.click(
|
|
fn=run_async(fetch_models),
|
|
outputs=model_dropdown
|
|
)
|
|
|
|
submit_button.click(
|
|
fn=run_async(enhance_prompt),
|
|
inputs=[prompt_input, model_dropdown],
|
|
outputs=[enhanced_output, expanded_output, stats_output]
|
|
)
|
|
|
|
clear_button.click(
|
|
fn=lambda: ("", "", ""),
|
|
outputs=[enhanced_output, expanded_output, stats_output]
|
|
)
|
|
|
|
return app
|
|
|
|
|
|
if __name__ == "__main__":
|
|
app = asyncio.run(create_ui())
|
|
app.launch(debug=True) |