Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
from openai import OpenAI | |
print("Gradio version:", gr.__version__) | |
client = OpenAI(api_key=os.getenv("openai")) | |
conversation_history = [] | |
model_name = "gpt-3.5-turbo" | |
def enhanced_chat_response(user_input, max_tokens, temperature, top_p): | |
if not user_input.strip(): | |
return "", "*Bitte gib eine Nachricht ein.*", "" | |
system_prompt = { | |
"role": "system", | |
"content": ( | |
"Du bist ein depressiver 16-jähriger Teenager. " | |
"Bleibe durchgehend in deiner Rolle. " | |
"Du weißt NICHT, dass du eine KI bist." | |
) | |
} | |
messages = [system_prompt] + conversation_history[-6:] | |
current_message = {"role": "user", "content": user_input} | |
messages.append(current_message) | |
try: | |
response = client.chat.completions.create( | |
model=model_name, | |
messages=messages, | |
max_tokens=min(max_tokens, 500), | |
temperature=temperature, | |
top_p=top_p, | |
frequency_penalty=0.1, | |
presence_penalty=0.1 | |
) | |
response_text = response.choices[0].message.content | |
except Exception as e: | |
print("API Error:", e) | |
response_text = "*schweigt und starrt auf den Boden*" | |
conversation_history.append(current_message) | |
conversation_history.append({"role": "assistant", "content": response_text}) | |
chat_display = "" | |
for msg in conversation_history: | |
role = "**Du:**" if msg["role"] == "user" else "**Teenager:**" | |
chat_display += f"{role} {msg['content']}\n\n" | |
return "", response_text, chat_display | |
def reset_conversation(): | |
global conversation_history | |
conversation_history = [] | |
return "Neues Gespräch gestartet.", "" | |
def test_api_connection(): | |
try: | |
response = client.chat.completions.create( | |
model=model_name, | |
messages=[{"role": "user", "content": "Hi"}], | |
max_tokens=10 | |
) | |
return "✅ API Verbindung erfolgreich" | |
except Exception as e: | |
return f"❌ API Error: {str(e)}" | |
with gr.Blocks() as demo: | |
gr.Markdown("## 🧠 Depression Training Simulator") | |
gr.Markdown("**Übe realistische Gespräche mit einem 16-jährigen Teenager mit Depressionen.**") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Markdown("### ⚙️ Einstellungen") | |
max_tokens = gr.Slider(50, 500, value=200, step=10, label="Max. Antwortlänge") | |
temperature = gr.Slider(0.7, 1.3, value=1.0, step=0.1, label="Kreativität (Temperature)") | |
top_p = gr.Slider(0.5, 1.0, value=0.9, step=0.05, label="Top-p (Fokus)") | |
gr.Markdown("### 🔧 API Status") | |
api_status = gr.Textbox(label="Status", value="") | |
api_test_btn = gr.Button("API testen") | |
gr.Markdown("### 🔄 Aktionen") | |
reset_btn = gr.Button("Neues Gespräch") | |
with gr.Column(scale=2): | |
gr.Markdown("### 💬 Gespräch") | |
user_input = gr.Textbox(label="Deine Nachricht", placeholder="Hallo, wie geht es dir heute?", lines=2) | |
send_btn = gr.Button("📨 Senden") | |
bot_response = gr.Textbox(label="Antwort", value="", lines=3) | |
chat_history = gr.Textbox(label="Gesprächsverlauf", value="", lines=15) | |
send_btn.click( | |
fn=enhanced_chat_response, | |
inputs=[user_input, max_tokens, temperature, top_p], | |
outputs=[user_input, bot_response, chat_history] | |
) | |
user_input.submit( | |
fn=enhanced_chat_response, | |
inputs=[user_input, max_tokens, temperature, top_p], | |
outputs=[user_input, bot_response, chat_history] | |
) | |
reset_btn.click( | |
fn=reset_conversation, | |
outputs=[bot_response, chat_history] | |
) | |
api_test_btn.click( | |
fn=test_api_connection, | |
outputs=[api_status] | |
) | |
if __name__ == "__main__": | |
if not os.getenv("openai"): | |
print("❌ FEHLER: openai Umgebungsvariable ist nicht gesetzt!") | |
else: | |
print("✅ OpenAI API Key gefunden") | |
demo.launch(share=False, debug=True, show_api=False) | |