Adam / app.py
0notexist0's picture
Update app.py
ef9f0e5 verified
"""
OpenRouter Chatbot – elenco dinamico completo
Run: gradio app.py
"""
import os
import functools
import requests
import gradio as gr
# ------------------------------------------------------------------
# Configurazione
# ------------------------------------------------------------------
OPENROUTER_API_KEY = os.getenv(
"OPENROUTER_API_KEY",
"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"
)
# ------------------------------------------------------------------
# Utility per ottenere l’elenco modelli
# ------------------------------------------------------------------
@functools.lru_cache(maxsize=1)
def fetch_models() -> list[str]:
"""
Restituisce la lista completa di modelli offerti da OpenRouter.
Il decorator @lru_cache la memorizza in cache per tutta la durata
del processo (si aggiorna solo al riavvio del container).
"""
headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
try:
resp = requests.get(
"https://openrouter.ai/api/v1/models",
headers=headers,
timeout=15
)
resp.raise_for_status()
data = resp.json()
models = sorted(m["id"] for m in data["data"])
return models
except Exception as e:
gr.Warning(f"Impossibile scaricare l’elenco modelli: {e}")
return ["openai/gpt-4-turbo"] # fallback statico
# ------------------------------------------------------------------
# Funzione di chiamata al modello
# ------------------------------------------------------------------
def chat_with_openrouter(prompt: str, model: str):
headers = {
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"max_tokens": 4096,
"temperature": 0.7,
}
try:
resp = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers=headers,
json=payload,
timeout=60
)
resp.raise_for_status()
return resp.json()["choices"][0]["message"]["content"]
except Exception as e:
return f"❌ Errore: {e}"
# ------------------------------------------------------------------
# Interfaccia Gradio
# ------------------------------------------------------------------
def build_interface():
models = fetch_models()
with gr.Blocks(title="NotExistChatter – Tutti i modelli") as demo:
gr.Markdown("🤖project Adam🤖")
gr.Markdown("Il menù mostra **tutti** i modelli disponibili.")
with gr.Row():
model_dropdown = gr.Dropdown(
choices=models,
value=models[0] if models else None,
label="Modello",
allow_custom_value=False,
interactive=True
)
prompt_box = gr.Textbox(
label="Prompt",
placeholder="Scrivi qui il tuo messaggio...",
lines=4,
max_lines=10
)
output_box = gr.Textbox(
label="Risposta",
interactive=False,
lines=15,
max_lines=20
)
send_btn = gr.Button("Invia", variant="primary")
send_btn.click(
fn=chat_with_openrouter,
inputs=[prompt_box, model_dropdown],
outputs=output_box
)
prompt_box.submit(
fn=chat_with_openrouter,
inputs=[prompt_box, model_dropdown],
outputs=output_box
)
return demo
# Lancia l’interfaccia
if __name__ == "__main__":
build_interface().launch()