Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from openai import OpenAI | |
| import os | |
| import asyncio | |
| from contextlib import AsyncExitStack | |
| from mcp import ClientSession | |
| from mcp.client.streamable_http import streamablehttp_client | |
| cle_api = os.environ.get("CLE_API_MISTRAL") | |
| # Initialisation du client Mistral (API compatible OpenAI) | |
| clientLLM = OpenAI(api_key=cle_api, base_url="https://api.mistral.ai/v1") | |
| loop = asyncio.new_event_loop() | |
| asyncio.set_event_loop(loop) | |
| class MCPClientWrapper: | |
| def __init__(self): | |
| self.session = None | |
| self.exit_stack = None | |
| self.tools = [] | |
| def connect(self, server_url: str) -> str: | |
| return loop.run_until_complete(self._connect(server_url)) | |
| async def _connect(self, server_url: str) -> str: | |
| if self.exit_stack: | |
| await self.exit_stack.aclose() | |
| self.exit_stack = AsyncExitStack() | |
| # Utiliser le transport HTTP streamable ou SSE selon ce qui est disponible | |
| streams = await self.exit_stack.enter_async_context(streamablehttp_client(url=server_url)) | |
| # streams va typiquement donner une paire (transport_read, transport_write) ou similaire | |
| self.http_read, self.http_write = streams | |
| self.session = await self.exit_stack.enter_async_context(ClientSession(self.http_read, self.http_write)) | |
| await self.session.initialize() | |
| tools_response = await self.session.list_tools() | |
| self.tools = [ | |
| { | |
| "name": t.name, | |
| "description": t.description, | |
| "input_schema": t.inputSchema | |
| } | |
| for t in tools_response.tools | |
| ] | |
| tool_names = [t["name"] for t in self.tools] | |
| return f"Connecté au MCP {server_url}. Outils disponibles : {', '.join(tool_names)}" | |
| clientMCP = MCPClientWrapper() | |
| print(clientMCP.connect("https://huggingface.co/spaces/HackathonCRA/mcp")) | |
| print(clientMCP.tools) | |
| # Chatbot | |
| def chatbot(message, history): | |
| # Préparer l’historique | |
| messages = [] | |
| for user_msg, bot_msg in history: | |
| messages.append({"role": "user", "content": user_msg}) | |
| messages.append({"role": "assistant", "content": bot_msg}) | |
| messages.append({"role": "user", "content": message}) | |
| # Appel API Mistral | |
| response = clientLLM.chat.completions.create( | |
| model="mistral-small-latest", | |
| messages=messages, | |
| tools=clientMCP.tools # ⚡ maintenant on injecte directement les tools MCP récupérés | |
| ) | |
| bot_reply = response.choices[0].message.content.strip() | |
| history.append(("Vous: " + message, "Bot: " + bot_reply)) | |
| return history, history | |
| with gr.Blocks() as demo: | |
| chatbot_ui = gr.Chatbot(label="ChatBot") | |
| msg = gr.Textbox(placeholder="Écrivez un message...") | |
| msg.submit(chatbot, [msg, chatbot_ui], [chatbot_ui, chatbot_ui]) | |
| demo.launch(debug=True) | |