Spaces:
Sleeping
Sleeping
File size: 3,720 Bytes
ef9f0e5 9b5686a 6da9c1b ef9f0e5 6da9c1b ef9f0e5 9b5686a ef9f0e5 9b5686a ef9f0e5 9b5686a ef9f0e5 6da9c1b ef9f0e5 9b5686a ef9f0e5 6da9c1b ef9f0e5 6da9c1b 9b5686a ef9f0e5 6da9c1b ef9f0e5 6da9c1b ef9f0e5 9b5686a ef9f0e5 33517aa ef9f0e5 9b5686a ef9f0e5 9b5686a ef9f0e5 9b5686a ef9f0e5 6da9c1b 9b5686a ef9f0e5 6da9c1b ef9f0e5 6da9c1b 9b5686a ef9f0e5 9b5686a ef9f0e5 9b5686a ef9f0e5 9b5686a ef9f0e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
"""
OpenRouter Chatbot – elenco dinamico completo
Run: gradio app.py
"""
import os
import functools
import requests
import gradio as gr
# ------------------------------------------------------------------
# Configurazione
# ------------------------------------------------------------------
OPENROUTER_API_KEY = os.getenv(
"OPENROUTER_API_KEY",
"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"
)
# ------------------------------------------------------------------
# Utility per ottenere l’elenco modelli
# ------------------------------------------------------------------
@functools.lru_cache(maxsize=1)
def fetch_models() -> list[str]:
"""
Restituisce la lista completa di modelli offerti da OpenRouter.
Il decorator @lru_cache la memorizza in cache per tutta la durata
del processo (si aggiorna solo al riavvio del container).
"""
headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
try:
resp = requests.get(
"https://openrouter.ai/api/v1/models",
headers=headers,
timeout=15
)
resp.raise_for_status()
data = resp.json()
models = sorted(m["id"] for m in data["data"])
return models
except Exception as e:
gr.Warning(f"Impossibile scaricare l’elenco modelli: {e}")
return ["openai/gpt-4-turbo"] # fallback statico
# ------------------------------------------------------------------
# Funzione di chiamata al modello
# ------------------------------------------------------------------
def chat_with_openrouter(prompt: str, model: str):
headers = {
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"max_tokens": 4096,
"temperature": 0.7,
}
try:
resp = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers=headers,
json=payload,
timeout=60
)
resp.raise_for_status()
return resp.json()["choices"][0]["message"]["content"]
except Exception as e:
return f"❌ Errore: {e}"
# ------------------------------------------------------------------
# Interfaccia Gradio
# ------------------------------------------------------------------
def build_interface():
models = fetch_models()
with gr.Blocks(title="NotExistChatter – Tutti i modelli") as demo:
gr.Markdown("🤖project Adam🤖")
gr.Markdown("Il menù mostra **tutti** i modelli disponibili.")
with gr.Row():
model_dropdown = gr.Dropdown(
choices=models,
value=models[0] if models else None,
label="Modello",
allow_custom_value=False,
interactive=True
)
prompt_box = gr.Textbox(
label="Prompt",
placeholder="Scrivi qui il tuo messaggio...",
lines=4,
max_lines=10
)
output_box = gr.Textbox(
label="Risposta",
interactive=False,
lines=15,
max_lines=20
)
send_btn = gr.Button("Invia", variant="primary")
send_btn.click(
fn=chat_with_openrouter,
inputs=[prompt_box, model_dropdown],
outputs=output_box
)
prompt_box.submit(
fn=chat_with_openrouter,
inputs=[prompt_box, model_dropdown],
outputs=output_box
)
return demo
# Lancia l’interfaccia
if __name__ == "__main__":
build_interface().launch() |