| | import gradio as gr |
| | import os |
| | import sys |
| | import json |
| | import requests |
| | import random |
| |
|
| | |
| | |
| | |
| |
|
| | MODEL = "gpt-4.1-mini" |
| |
|
| | API_URL = os.getenv("API_URL") |
| | if not API_URL: |
| | raise RuntimeError("API_URL not set in Hugging Face Secrets") |
| |
|
| | DISABLED = os.getenv("DISABLED", "False") == "True" |
| |
|
| | keys = os.getenv("OPENAI_API_KEYS") |
| | if not keys: |
| | raise RuntimeError("OPENAI_API_KEYS not set in Hugging Face Secrets") |
| |
|
| | OPENAI_API_KEYS = [k.strip() for k in keys.split(",") if k.strip()] |
| | if not OPENAI_API_KEYS: |
| | raise RuntimeError("OPENAI_API_KEYS is empty") |
| |
|
| | NUM_THREADS = int(os.getenv("NUM_THREADS", "2")) |
| |
|
| | print("HF Space started") |
| | print("API_URL loaded") |
| | print("OPENAI_API_KEYS count:", len(OPENAI_API_KEYS)) |
| | print("NUM_THREADS:", NUM_THREADS) |
| |
|
| | |
| | |
| | |
| |
|
| | def exception_handler(exception_type, exception, traceback): |
| | print(f"{exception_type.__name__}: {exception}") |
| |
|
| | sys.excepthook = exception_handler |
| | sys.tracebacklimit = 0 |
| |
|
| | |
| | |
| | |
| |
|
| | def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request: gr.Request): |
| | if not OPENAI_API_KEYS: |
| | raise RuntimeError("No OpenAI API keys available") |
| |
|
| | OPENAI_API_KEY = random.choice(OPENAI_API_KEYS) |
| |
|
| | headers = { |
| | "Content-Type": "application/json", |
| | "Authorization": f"Bearer {OPENAI_API_KEY}" |
| | } |
| |
|
| | |
| | payload = { |
| | "model": MODEL, |
| | "input": inputs, |
| | "temperature": temperature, |
| | "top_p": top_p |
| | } |
| |
|
| | chat_counter += 1 |
| | history.append({"role": "user", "content": inputs}) |
| |
|
| | try: |
| | response = requests.post(API_URL, headers=headers, json=payload, timeout=60) |
| | print("Status code:", response.status_code) |
| |
|
| | if response.status_code != 200: |
| | print("Response text:", response.text) |
| | output_text = f"Error: {response.status_code}" |
| | else: |
| | data = response.json() |
| | |
| | output_text = data["output"][0]["content"][0]["text"] |
| | history.append({"role": "assistant", "content": output_text}) |
| |
|
| | |
| | messages = [{"role": msg["role"], "content": msg["content"]} for msg in history] |
| |
|
| | yield messages, history, chat_counter, response, gr.update(interactive=True), gr.update(interactive=True) |
| |
|
| | except Exception as e: |
| | print(f"API request error: {e}") |
| | messages = [{"role": msg["role"], "content": msg["content"]} for msg in history] |
| | yield messages, history, chat_counter, None, gr.update(interactive=True), gr.update(interactive=True) |
| |
|
| | |
| | |
| | |
| |
|
| | def reset_textbox(): |
| | return gr.update(value="", interactive=False), gr.update(interactive=False) |
| |
|
| | |
| | |
| | |
| |
|
| | title = "<h1 align='center'>Hello welcome</h1>" |
| | if DISABLED: |
| | title = """<h1 align="center" style="color:red"> |
| | This app has reached its usage limit. Please check back later. |
| | </h1>""" |
| |
|
| | description = "kutti." |
| |
|
| | theme = gr.themes.Default(primary_hue="green") |
| |
|
| | with gr.Blocks( |
| | css=""" |
| | #col_container { margin-left: auto; margin-right: auto; } |
| | #chatbot { height: 520px; overflow: auto; } |
| | """, |
| | theme=theme, |
| | ) as demo: |
| |
|
| | gr.HTML(title) |
| | gr.HTML("<h3 align='center'>I'm Kutti</h3>") |
| |
|
| | with gr.Column(elem_id="col_container", visible=False) as main_block: |
| | chatbot = gr.Chatbot(elem_id="chatbot", type="messages") |
| | inputs = gr.Textbox(placeholder="Hi there!", label="Type an input and press Enter") |
| | state = gr.State([]) |
| |
|
| | with gr.Row(): |
| | with gr.Column(scale=7): |
| | b1 = gr.Button(visible=not DISABLED) |
| | with gr.Column(scale=3): |
| | server_status_code = gr.Textbox(label="Status code") |
| |
|
| | with gr.Accordion("Parameters", open=False): |
| | top_p = gr.Slider(0, 1.0, value=1.0, step=0.05, label="Top-p") |
| | temperature = gr.Slider(0, 5.0, value=1.0, step=0.1, label="Temperature") |
| | chat_counter = gr.Number(value=0, visible=False, precision=0) |
| |
|
| | with gr.Column(elem_id="user_consent_container") as user_consent_block: |
| | accept_checkbox = gr.Checkbox(visible=False) |
| | js = "(x) => confirm('By clicking OK, you agree to data usage terms.')" |
| |
|
| | with gr.Accordion("User Consent", open=True): |
| | gr.Markdown( |
| | "By using this app, you consent to data collection for research and security purposes." |
| | ) |
| | accept_button = gr.Button("I Agree") |
| |
|
| | def enable_inputs(): |
| | return gr.update(visible=False), gr.update(visible=True) |
| |
|
| | accept_button.click(None, None, accept_checkbox, js=js, queue=False) |
| | accept_checkbox.change( |
| | fn=enable_inputs, |
| | inputs=[], |
| | outputs=[user_consent_block, main_block], |
| | queue=False, |
| | ) |
| |
|
| | inputs.submit(reset_textbox, [], [inputs, b1], queue=False) |
| | inputs.submit( |
| | predict, |
| | [inputs, top_p, temperature, chat_counter, chatbot, state], |
| | [chatbot, state, chat_counter, server_status_code, inputs, b1], |
| | ) |
| |
|
| | b1.click(reset_textbox, [], [inputs, b1], queue=False) |
| | b1.click( |
| | predict, |
| | [inputs, top_p, temperature, chat_counter, chatbot, state], |
| | [chatbot, state, chat_counter, server_status_code, inputs, b1], |
| | ) |
| |
|
| | demo.queue( |
| | max_size=10, |
| | default_concurrency_limit=NUM_THREADS, |
| | api_open=False, |
| | ).launch(share=False) |
| |
|