Spaces:
Running
Running
Abid Ali Awan
refactor: Simplify chat message handling in the Gradio application by consolidating user message updates and tool feedback processing, enhancing clarity in message history and improving the overall structure of the chat response pipeline.
e01ab7c
| """ | |
| Gradio + OpenAI MCP Connector — Clean, Fast, Streaming, With File Upload | |
| """ | |
| import os | |
| import shutil | |
| import gradio as gr | |
| from openai import OpenAI | |
| # --------------------- | |
| # CONFIGURATION | |
| # --------------------- | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| MCP_SERVER_URL = "https://mcp-1st-birthday-auto-deployer.hf.space/gradio_api/mcp/" | |
| MODEL_FAST = "gpt-5-mini" # for tool resolution | |
| MODEL_STREAM = "gpt-5.1" # for final streaming reply | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| SYSTEM_PROMPT = """ | |
| You are a fast MLOps assistant with access to remote MCP tools. | |
| Use tools only when necessary. | |
| Keep reasoning effort LOW for speed. | |
| After tools run, summarize clearly and concisely. | |
| """ | |
| # --------------------- | |
| # NATIVE MCP CONNECTOR | |
| # --------------------- | |
| TOOLS = [ | |
| { | |
| "type": "mcp", | |
| "server_label": "deploy_tools", | |
| "server_url": MCP_SERVER_URL, | |
| # transport auto-detected; HF space supports HTTP | |
| } | |
| ] | |
| # --------------------- | |
| # FILE UPLOAD HANDLER | |
| # --------------------- | |
| def handle_upload(file_obj, request: gr.Request): | |
| if file_obj is None: | |
| return None | |
| # Ensure file is in a stable path | |
| local_path = file_obj.name | |
| stable_path = os.path.join("/tmp", os.path.basename(local_path)) | |
| try: | |
| shutil.copy(local_path, stable_path) | |
| local_path = stable_path | |
| except Exception: | |
| pass | |
| # Build public Gradio URL | |
| base = str(request.base_url).rstrip("/") | |
| return f"{base}/gradio_api/file={local_path}" | |
| # --------------------- | |
| # MAIN CHAT HANDLER | |
| # --------------------- | |
| def chat_send_stream(user_msg, history, file_url): | |
| # history always starts as list of dicts | |
| if history is None: | |
| history = [] | |
| # Append the user's message | |
| history.append({"role": "user", "content": user_msg}) | |
| # Build OpenAI message history | |
| messages = [{"role": "system", "content": SYSTEM_PROMPT}] | |
| messages.extend(history) | |
| # Inject file context | |
| final_user_msg = user_msg | |
| if file_url: | |
| final_user_msg = f"[Uploaded CSV file: {file_url}]\n\n{user_msg}" | |
| messages[-1] = {"role": "user", "content": final_user_msg} | |
| # ---------------------------------- | |
| # PHASE 1 — TOOL RESOLUTION | |
| # ---------------------------------- | |
| tool_phase = client.responses.create( | |
| model=MODEL_FAST, | |
| reasoning={"effort": "low"}, | |
| tools=TOOLS, | |
| instructions=SYSTEM_PROMPT, | |
| input=messages, | |
| ) | |
| tool_feedback = [] | |
| if tool_phase.output: | |
| for item in tool_phase.output: | |
| if item.type == "tool_call": | |
| tool_feedback.append(f"🛠️ Used tool `{item.name}`.") | |
| elif item.type == "tool_result": | |
| tool_feedback.append(str(item.content)) | |
| if not tool_feedback: | |
| tool_feedback.append("No MCP tools needed.") | |
| else: | |
| tool_feedback.append("No MCP tools needed.") | |
| # Add assistant message with tool feedback | |
| history.append({"role": "assistant", "content": "\n".join(tool_feedback)}) | |
| yield history | |
| # ---------------------------------- | |
| # PHASE 2 — STREAMING FINAL ANSWER | |
| # ---------------------------------- | |
| final_msg = history[-1]["content"] + "\n\n" | |
| history[-1]["content"] = final_msg | |
| stream = client.responses.create( | |
| model=MODEL_STREAM, | |
| reasoning={"effort": "low"}, | |
| instructions=SYSTEM_PROMPT, | |
| input=messages + [history[-1]], | |
| stream=True, | |
| ) | |
| for ev in stream: | |
| if ev.type == "response.output_text.delta": | |
| final_msg += ev.delta | |
| history[-1]["content"] = final_msg | |
| yield history | |
| elif ev.type == "response.completed": | |
| break | |
| stream.close() | |
| # --------------------- | |
| # GRADIO UI | |
| # --------------------- | |
| with gr.Blocks(title="MCP + GPT-5 — Fast Streaming MLOps Agent") as demo: | |
| gr.Markdown(""" | |
| # 🚀 AI-Driven MLOps Agent (MCP-Powered) | |
| - Upload a CSV file | |
| - Tools resolve instantly | |
| - Final answer streams smoothly | |
| """) | |
| file_state = gr.State() | |
| uploader = gr.File(label="Upload CSV file", type="filepath", file_count="single") | |
| uploader.change(handle_upload, inputs=[uploader], outputs=[file_state]) | |
| chatbot = gr.Chatbot(label="Chat") | |
| msg = gr.Textbox(label="Message") | |
| send = gr.Button("Send") | |
| send.click( | |
| chat_send_stream, | |
| inputs=[msg, chatbot, file_state], | |
| outputs=[chatbot], | |
| ).then(lambda: "", outputs=[msg]) | |
| msg.submit( | |
| chat_send_stream, | |
| inputs=[msg, chatbot, file_state], | |
| outputs=[chatbot], | |
| ).then(lambda: "", outputs=[msg]) | |
| if __name__ == "__main__": | |
| demo.queue().launch( | |
| allowed_paths=["/tmp"], | |
| show_error=True, | |
| quiet=True, | |
| ) | |