|
""" |
|
UI Components for Universal MCP Client - Fixed with optimal MCP guidance |
|
""" |
|
import gradio as gr |
|
from gradio import ChatMessage |
|
from typing import Tuple, List, Dict, Any |
|
import os |
|
import logging |
|
import traceback |
|
from openai import OpenAI |
|
|
|
from config import AppConfig, CUSTOM_CSS, HF_HUB_AVAILABLE |
|
from chat_handler import ChatHandler |
|
from server_manager import ServerManager |
|
from mcp_client import UniversalMCPClient |
|
|
|
|
|
if HF_HUB_AVAILABLE: |
|
from huggingface_hub import login, logout, whoami |
|
from huggingface_hub.utils import HfHubHTTPError |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
class UIComponents: |
|
"""Manages Gradio UI components with improved MCP server management""" |
|
|
|
def __init__(self, mcp_client: UniversalMCPClient): |
|
self.mcp_client = mcp_client |
|
self.chat_handler = ChatHandler(mcp_client) |
|
self.server_manager = ServerManager(mcp_client) |
|
self.current_user = None |
|
|
|
def _initialize_default_servers(self): |
|
"""Initialize default MCP servers on app startup""" |
|
default_servers = [ |
|
("background removal", "ysharma/background-removal-mcp"), |
|
("text to video", "ysharma/ltx-video-distilled"), |
|
("text to speech", "ysharma/Kokoro-TTS-mcp-test"), |
|
("text to image", "ysharma/dalle-3-xl-lora-v2") |
|
] |
|
|
|
logger.info("🚀 Initializing default MCP servers...") |
|
|
|
for server_name, space_id in default_servers: |
|
try: |
|
status_msg, _ = self.server_manager.add_custom_server(server_name, space_id) |
|
if "✅" in status_msg: |
|
logger.info(f"✅ Added default server: {server_name}") |
|
else: |
|
logger.warning(f"⚠️ Failed to add default server {server_name}: {status_msg}") |
|
except Exception as e: |
|
logger.error(f"❌ Error adding default server {server_name}: {e}") |
|
|
|
logger.info(f"📊 Initialized {len(self.mcp_client.servers)} default servers") |
|
|
|
def create_interface(self) -> gr.Blocks: |
|
"""Create the main Gradio interface with improved layout""" |
|
with gr.Blocks( |
|
title="Universal MCP Client - HF Inference Powered", |
|
theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(), |
|
fill_height=True, |
|
css=CUSTOM_CSS |
|
) as demo: |
|
|
|
|
|
self._create_sidebar() |
|
|
|
|
|
chatbot = self._create_main_chat_area() |
|
|
|
|
|
self._setup_event_handlers(chatbot, demo) |
|
|
|
return demo |
|
|
|
def _create_sidebar(self): |
|
"""Create the sidebar with login, provider/model selection, and server management""" |
|
with gr.Sidebar(elem_id="main-sidebar"): |
|
gr.Markdown("# 🤗 chat.gradio.app") |
|
|
|
|
|
self._create_login_section() |
|
|
|
|
|
self._create_provider_model_selection() |
|
|
|
|
|
self._create_server_management_section() |
|
|
|
|
|
with gr.Accordion("📚 Guide & Info", open=False): |
|
gr.Markdown(""" |
|
## 🎯 How To Use |
|
1. **Login**: Login with your HuggingFace account for API access |
|
2. **Add MCP Servers**: Connect to various AI tools on 🤗Hub |
|
3. **Enable/Disable Servers**: Use checkboxes to control which servers are active |
|
4. **Chat**: Interact with GPT-OSS and use connected MCP Servers |
|
|
|
## 💭 Features |
|
- **GPT-OSS Models**: OpenAI's latest open-source reasoning models (128k context) |
|
- **MCP Integration**: Connect to thousands of AI apps on Hub via MCP protocol |
|
- **Multi-Provider**: Access via Cerebras, Fireworks, Together AI, and others |
|
- **Media Support**: Automatic embedding of media -- images, audio, and video etc |
|
""") |
|
|
|
def _create_login_section(self): |
|
"""Create HuggingFace OAuth login section""" |
|
with gr.Group(elem_classes="login-section"): |
|
gr.Markdown("## 🔑 Authentication", container=True) |
|
self.login_button = gr.LoginButton( |
|
value="Sign in with Hugging Face", |
|
size="sm" |
|
) |
|
self.login_status = gr.Markdown("⚪ Please sign in to access Inference Providers", container=True) |
|
|
|
def _create_provider_model_selection(self): |
|
"""Create provider and model selection dropdowns with defaults""" |
|
with gr.Group(elem_classes="provider-model-selection"): |
|
gr.Markdown("## 🚀 Inference Configuration", container=True) |
|
|
|
|
|
provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys()) |
|
self.provider_dropdown = gr.Dropdown( |
|
choices=provider_choices, |
|
label="🔧 Inference Provider", |
|
value="cerebras", |
|
info="Choose your preferred inference provider" |
|
) |
|
|
|
|
|
self.model_dropdown = gr.Dropdown( |
|
choices=[], |
|
label="🤖 Model", |
|
value=None, |
|
info="Select GPT OSS model variant" |
|
) |
|
|
|
|
|
self.api_status = gr.Markdown("⚪ Select provider and model to begin", container=True) |
|
|
|
def _create_server_management_section(self): |
|
"""Create the server management section with checkboxes and guidance""" |
|
with gr.Group(): |
|
gr.Markdown("## 🔧 MCP Servers", container=True) |
|
|
|
|
|
gr.Markdown(""" |
|
<div style="background: #f0f8ff; padding: 10px; border-radius: 5px; border-left: 3px solid #4169e1; margin-bottom: 10px;"> |
|
<strong>💡 Best Practice:</strong> For optimal performance, we recommend keeping |
|
<strong>3-6 MCP servers</strong> enabled at once. Too many servers can: |
|
• Increase context usage (reducing available tokens for conversation) |
|
• Potentially confuse the model when selecting tools |
|
• Slow down response times |
|
|
|
You can add more servers but selectively enable only the ones you need for your current task. |
|
</div> |
|
""", container=True) |
|
|
|
|
|
with gr.Row(): |
|
self.add_server_btn = gr.Button("Add MCP Server", variant="primary", size="sm") |
|
self.remove_all_btn = gr.Button("Remove All", variant="secondary", size="sm") |
|
|
|
|
|
self.save_server_btn = gr.Button("Save Server", variant="primary", size="sm", visible=False) |
|
|
|
|
|
from mcp_spaces_finder import _finder |
|
spaces = _finder.get_mcp_spaces() |
|
self.mcp_dropdown = gr.Dropdown( |
|
choices=spaces, |
|
label=f"**Available MCP Servers ({len(spaces)}**)", |
|
value=None, |
|
info="Choose from HuggingFace spaces", |
|
allow_custom_value=True, |
|
visible=False |
|
) |
|
|
|
self.server_name = gr.Textbox( |
|
label="Server Title", |
|
placeholder="e.g., Text to Image Generator", |
|
visible=False |
|
) |
|
|
|
|
|
self.server_checkboxes = gr.CheckboxGroup( |
|
label="Active Servers (Check to enable)", |
|
choices=[], |
|
value=[], |
|
info="✅ Enabled servers can be used | ⬜ Disabled servers are ignored" |
|
) |
|
|
|
self.add_server_output = gr.Markdown("", visible=False, container=True) |
|
|
|
def _create_main_chat_area(self) -> gr.Chatbot: |
|
"""Create the main chat area""" |
|
with gr.Column(elem_classes="main-content"): |
|
chatbot = gr.Chatbot( |
|
label="Universal MCP-Powered AI Assistant", |
|
show_label=False, |
|
type="messages", |
|
scale=1, |
|
show_copy_button=True, |
|
avatar_images=None, |
|
value=[ |
|
ChatMessage( |
|
role="assistant", |
|
content="""Welcome! I'm your MCP-powered AI assistant using OpenAI's GPT-OSS models via HuggingFace Inference Providers. |
|
|
|
🎉 **Pre-loaded MCP servers ready to use:** |
|
- **background removal** - Remove backgrounds from images |
|
- **text to video** - Generate videos from text descriptions |
|
- **text to speech** - Convert text to natural speech |
|
- **text to image** - Create images from text prompts |
|
|
|
You can start using these servers right away, add more servers, or remove them as needed. Try asking me to generate an image, create speech, or any other task!""" |
|
) |
|
] |
|
) |
|
|
|
|
|
|
|
with gr.Column(scale=0, elem_classes="input-area"): |
|
self.chat_input = gr.MultimodalTextbox( |
|
interactive=True, |
|
file_count="multiple", |
|
placeholder="Enter message or upload files...", |
|
show_label=False, |
|
sources=["upload", "microphone"], |
|
file_types=None |
|
) |
|
|
|
return chatbot |
|
|
|
def _setup_event_handlers(self, chatbot: gr.Chatbot, demo: gr.Blocks): |
|
"""Set up all event handlers""" |
|
|
|
|
|
def handle_oauth_profile(profile: gr.OAuthProfile | None, token: gr.OAuthToken | None): |
|
if profile is None: |
|
return "⚪ Please sign in to access Inference Providers" |
|
|
|
logger.info(f"👤 OAuth profile received for user: {profile.name}") |
|
|
|
if token and token.token: |
|
logger.info("🔑 OAuth token received, updating HF client...") |
|
os.environ["HF_TOKEN"] = token.token |
|
try: |
|
self.mcp_client.hf_client = OpenAI( |
|
base_url="https://router.huggingface.co/v1", |
|
api_key=token.token |
|
) |
|
logger.info("✅ HuggingFace Inference client updated with OAuth token") |
|
except Exception as e: |
|
logger.error(f"❌ Failed to update HF client: {e}") |
|
|
|
return f"✅ Signed in as: **{profile.name}**" |
|
|
|
|
|
def handle_provider_change(provider_id): |
|
if not provider_id: |
|
return gr.Dropdown(choices=[], value=None), "⚪ Select provider first" |
|
|
|
available_models = AppConfig.get_available_models_for_provider(provider_id) |
|
model_choices = [(AppConfig.AVAILABLE_MODELS[model]["name"], model) for model in available_models] |
|
|
|
|
|
default_model = "openai/gpt-oss-120b" if "openai/gpt-oss-120b" in available_models else (available_models[0] if available_models else None) |
|
|
|
|
|
if default_model: |
|
model_info = AppConfig.AVAILABLE_MODELS.get(default_model, {}) |
|
context_length = model_info.get("context_length", 128000) |
|
status_msg = f"✅ Provider selected, model auto-selected ({context_length:,} token context)" |
|
else: |
|
status_msg = "✅ Provider selected, please select a model" |
|
|
|
return ( |
|
gr.Dropdown(choices=model_choices, value=default_model, label="🤖 Model"), |
|
status_msg |
|
) |
|
|
|
|
|
def handle_model_change(provider_id, model_id): |
|
if not provider_id or not model_id: |
|
return "⚪ Select both provider and model" |
|
|
|
self.mcp_client.set_model_and_provider(provider_id, model_id) |
|
|
|
|
|
model_info = AppConfig.AVAILABLE_MODELS.get(model_id, {}) |
|
context_length = model_info.get("context_length", 128000) |
|
active_params = model_info.get("active_params", "N/A") |
|
|
|
if self.mcp_client.hf_client: |
|
return f"✅ Ready! Using {active_params} active params, {context_length:,} token context" |
|
else: |
|
return "❌ Please login first" |
|
|
|
|
|
def submit_message(message, history): |
|
if message and (message.get("text", "").strip() or message.get("files", [])): |
|
converted_history = [] |
|
for msg in history: |
|
if isinstance(msg, dict): |
|
converted_history.append(ChatMessage( |
|
role=msg.get('role', 'assistant'), |
|
content=msg.get('content', ''), |
|
metadata=msg.get('metadata', None) |
|
)) |
|
else: |
|
converted_history.append(msg) |
|
|
|
new_history, cleared_input = self.chat_handler.process_multimodal_message(message, converted_history) |
|
return new_history, cleared_input |
|
return history, gr.MultimodalTextbox(value=None, interactive=False) |
|
|
|
def enable_input(): |
|
return gr.MultimodalTextbox(interactive=True) |
|
|
|
def show_add_server_fields(): |
|
return [ |
|
gr.Dropdown(visible=True), |
|
gr.Textbox(visible=True), |
|
gr.Button(interactive=False), |
|
gr.Button(visible=True) |
|
] |
|
|
|
def hide_add_server_fields(): |
|
return [ |
|
gr.Dropdown(visible=False, value=None), |
|
gr.Textbox(visible=False, value=""), |
|
gr.Button(interactive=True), |
|
gr.Button(visible=False) |
|
] |
|
|
|
def handle_add_server(server_title, selected_space): |
|
if not server_title or not selected_space: |
|
return [ |
|
gr.Dropdown(visible=False, value=None), |
|
gr.Textbox(visible=False, value=""), |
|
gr.Button(interactive=True), |
|
gr.Button(visible=False), |
|
gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()), |
|
value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]), |
|
gr.Markdown("❌ Please provide both server title and space selection", visible=True) |
|
] |
|
|
|
try: |
|
status_msg, _ = self.server_manager.add_custom_server(server_title.strip(), selected_space) |
|
|
|
|
|
server_choices = list(self.mcp_client.servers.keys()) |
|
enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled] |
|
|
|
|
|
warning_msg = "" |
|
if len(enabled_servers) > 6: |
|
warning_msg = "\n\n⚠️ **Note:** You have more than 6 servers enabled. Consider disabling some for better performance." |
|
|
|
return [ |
|
gr.Dropdown(visible=False, value=None), |
|
gr.Textbox(visible=False, value=""), |
|
gr.Button(interactive=True), |
|
gr.Button(visible=False), |
|
gr.CheckboxGroup(choices=server_choices, value=enabled_servers), |
|
gr.Markdown(status_msg + warning_msg, visible=True) |
|
] |
|
|
|
except Exception as e: |
|
logger.error(f"Error adding server: {e}") |
|
return [ |
|
gr.Dropdown(visible=False, value=None), |
|
gr.Textbox(visible=False, value=""), |
|
gr.Button(interactive=True), |
|
gr.Button(visible=False), |
|
gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()), |
|
value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]), |
|
gr.Markdown(f"❌ Error: {str(e)}", visible=True) |
|
] |
|
|
|
def handle_server_toggle(enabled_servers): |
|
"""Handle enabling/disabling servers via checkboxes""" |
|
|
|
for server_name in self.mcp_client.servers.keys(): |
|
self.mcp_client.enable_server(server_name, server_name in enabled_servers) |
|
|
|
enabled_count = len(enabled_servers) |
|
|
|
|
|
if enabled_count == 0: |
|
message = "ℹ️ No servers enabled - chatbot will use native capabilities only" |
|
elif enabled_count <= 6: |
|
message = f"✅ {enabled_count} server{'s' if enabled_count != 1 else ''} enabled - optimal configuration" |
|
else: |
|
message = f"⚠️ {enabled_count} servers enabled - consider reducing to 3-6 for better performance" |
|
|
|
return gr.Markdown(message, visible=True) |
|
|
|
def handle_remove_all(): |
|
"""Remove all MCP servers""" |
|
count = self.mcp_client.remove_all_servers() |
|
return [ |
|
gr.CheckboxGroup(choices=[], value=[]), |
|
gr.Markdown(f"✅ Removed all {count} servers", visible=True) |
|
] |
|
|
|
|
|
def initialize_defaults(): |
|
"""Initialize default servers and update UI on app load""" |
|
self._initialize_default_servers() |
|
|
|
|
|
server_choices = list(self.mcp_client.servers.keys()) |
|
enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled] |
|
|
|
return gr.CheckboxGroup( |
|
choices=server_choices, |
|
value=enabled_servers, |
|
label=f"Active Servers ({len(server_choices)} loaded)" |
|
) |
|
|
|
|
|
demo.load( |
|
fn=handle_oauth_profile, |
|
outputs=[self.login_status] |
|
) |
|
|
|
|
|
demo.load( |
|
fn=lambda: handle_provider_change("cerebras"), |
|
outputs=[self.model_dropdown, self.api_status] |
|
) |
|
|
|
|
|
demo.load( |
|
fn=initialize_defaults, |
|
outputs=[self.server_checkboxes] |
|
) |
|
|
|
self.provider_dropdown.change( |
|
handle_provider_change, |
|
inputs=[self.provider_dropdown], |
|
outputs=[self.model_dropdown, self.api_status] |
|
) |
|
|
|
self.model_dropdown.change( |
|
handle_model_change, |
|
inputs=[self.provider_dropdown, self.model_dropdown], |
|
outputs=[self.api_status] |
|
) |
|
|
|
|
|
chat_submit = self.chat_input.submit( |
|
submit_message, |
|
inputs=[self.chat_input, chatbot], |
|
outputs=[chatbot, self.chat_input] |
|
) |
|
chat_submit.then(enable_input, None, [self.chat_input]) |
|
|
|
|
|
self.add_server_btn.click( |
|
fn=show_add_server_fields, |
|
outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn] |
|
) |
|
|
|
|
|
self.save_server_btn.click( |
|
fn=handle_add_server, |
|
inputs=[self.server_name, self.mcp_dropdown], |
|
outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn, self.server_checkboxes, self.add_server_output] |
|
) |
|
|
|
self.server_checkboxes.change( |
|
handle_server_toggle, |
|
inputs=[self.server_checkboxes], |
|
outputs=[self.add_server_output] |
|
) |
|
|
|
self.remove_all_btn.click( |
|
handle_remove_all, |
|
outputs=[self.server_checkboxes, self.add_server_output] |
|
) |
|
|