|
|
|
import os |
|
import time |
|
import gradio as gr |
|
from openai import OpenAI |
|
import google.generativeai as genai |
|
|
|
|
|
|
|
|
|
SECRET_PASSWORD = os.getenv("PWD") |
|
OPENAI_API_KEY = os.getenv('API') |
|
GOOGLE_API_KEY = os.getenv("API_GOOGLE") |
|
|
|
|
|
SYSTEM_PROMPT = "You are a helpful AI assistant. Your answer should be as detailed as possible." |
|
MODEL_CHOICES = ["gpt-4.1", "o4-mini", "gemini-2.5-pro"] |
|
|
|
|
|
|
|
|
|
if not SECRET_PASSWORD: |
|
raise ValueError("Secret 'PWD' not found. Set it in the Hugging Face Space secrets.") |
|
if not OPENAI_API_KEY: |
|
raise ValueError("Secret 'API' (for OpenAI) not found. Set it in the Hugging Face Space secrets.") |
|
if not GOOGLE_API_KEY: |
|
print("Warning: 'API_GOOGLE' not found. The Gemini model will be unavailable.") |
|
|
|
|
|
client = OpenAI(api_key=OPENAI_API_KEY) |
|
|
|
|
|
if GOOGLE_API_KEY: |
|
try: |
|
genai.configure(api_key=GOOGLE_API_KEY) |
|
except Exception as e: |
|
print(f"Could not configure Google AI client: {e}") |
|
|
|
|
|
CSS = """ |
|
footer {visibility: hidden} |
|
""" |
|
|
|
|
|
|
|
def check_pw(pw): |
|
"""Validates the password and shows/hides the main app.""" |
|
if pw == SECRET_PASSWORD: |
|
return ("β
Welcome!", gr.update(visible=False), gr.update(visible=True)) |
|
else: |
|
time.sleep(1) |
|
return ("π« Wrong password.", gr.update(visible=True), gr.update(visible=False)) |
|
|
|
def add_user_message(user_message, history): |
|
"""Appends the user's message to the chat history.""" |
|
return "", history + [{"role": "user", "content": user_message}] |
|
|
|
def generate_bot_response(history, model_name): |
|
""" |
|
Calls the appropriate LLM, streams the response back to the chatbot, |
|
and handles potential errors gracefully. |
|
""" |
|
response_text = ask_llm(history, model_name) |
|
history.append({"role": "assistant", "content": ""}) |
|
|
|
for char in response_text: |
|
history[-1]["content"] += char |
|
time.sleep(0.01) |
|
yield history |
|
|
|
def ask_llm(history, model_name): |
|
"""Generic function to call the appropriate LLM API.""" |
|
try: |
|
if model_name.startswith('gpt-') or model_name.startswith('o'): |
|
messages_to_send = [{"role": "system", "content": SYSTEM_PROMPT}] + history |
|
response = client.chat.completions.create( |
|
model=model_name, messages=messages_to_send, stream=False |
|
) |
|
return response.choices[0].message.content |
|
elif model_name.startswith('gemini-'): |
|
if not GOOGLE_API_KEY: |
|
raise ValueError("Google API Key is not configured.") |
|
|
|
|
|
if not history: |
|
return "Please start the conversation." |
|
|
|
gemini_model = genai.GenerativeModel( |
|
model_name=model_name, system_instruction=SYSTEM_PROMPT |
|
) |
|
|
|
gemini_history = convert_history_to_gemini(history[:-1]) |
|
user_message = history[-1]["content"] |
|
|
|
chat = gemini_model.start_chat(history=gemini_history) |
|
response = chat.send_message(user_message) |
|
return response.text |
|
else: |
|
raise ValueError(f"Unknown model provider for model: {model_name}") |
|
except Exception as e: |
|
print(f"Error calling API for model {model_name}: {e}") |
|
return f"β **Error:** Could not get a response from `{model_name}`. Please check the API key and server logs.\n\n_Details: {e}_" |
|
|
|
def convert_history_to_gemini(openai_history): |
|
"""Converts OpenAI-formatted history to Gemini-formatted history.""" |
|
|
|
filtered_history = [msg for msg in openai_history if msg["role"] in ["user", "assistant"]] |
|
return [ |
|
{"role": "model" if msg["role"] == "assistant" else "user", "parts": [msg["content"]]} |
|
for msg in filtered_history |
|
] |
|
|
|
|
|
|
|
with gr.Blocks(css=CSS, title='Hello, Yoda.') as demo: |
|
|
|
with gr.Column(elem_id="login_page", visible=True) as login_page: |
|
gr.Markdown("## π Please log in to access this App") |
|
pw = gr.Textbox(label="Password", type="password", placeholder="Enter the password") |
|
btn_login = gr.Button("Log in") |
|
msg_login = gr.Textbox(value="", interactive=False, show_label=False, container=False) |
|
|
|
|
|
with gr.Column(elem_id="main_app", visible=False) as main_app: |
|
gr.Markdown("<h1><center>Hello, Yoda.</center></h1>") |
|
model_selector = gr.Radio( |
|
choices=MODEL_CHOICES, value=MODEL_CHOICES[0], label="Choose a Model" |
|
) |
|
|
|
chatbot = gr.Chatbot( |
|
elem_id="chatbot", |
|
label="Chatbot", |
|
bubble_full_width=False, |
|
height=600, |
|
type="messages" |
|
) |
|
|
|
with gr.Row(elem_id="input_row"): |
|
msg = gr.Textbox( |
|
elem_id="msg_textbox", |
|
show_label=False, |
|
placeholder="Ask me a question... (Shift+Enter for new line, Enter for send)", |
|
scale=4, |
|
lines=1, |
|
autofocus=True |
|
) |
|
submit_btn = gr.Button("Submit", scale=1, variant="primary") |
|
|
|
clear_btn = gr.ClearButton([msg, chatbot]) |
|
|
|
|
|
btn_login.click(check_pw, pw, [msg_login, login_page, main_app]) |
|
pw.submit(check_pw, pw, [msg_login, login_page, main_app]) |
|
|
|
msg.submit(add_user_message, [msg, chatbot], [msg, chatbot], queue=False).then( |
|
generate_bot_response, [chatbot, model_selector], chatbot |
|
) |
|
submit_btn.click(add_user_message, [msg, chatbot], [msg, chatbot], queue=False).then( |
|
generate_bot_response, [chatbot, model_selector], chatbot |
|
) |
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
demo.launch() |