malek-messaoudii
commited on
Commit
·
1315b27
1
Parent(s):
3c58a94
update mcp part
Browse files- main.py +15 -20
- routes/mcp_routes.py +4 -6
- services/mcp_service.py +12 -82
main.py
CHANGED
|
@@ -61,17 +61,15 @@ stance_model_manager = None
|
|
| 61 |
kpa_model_manager = None
|
| 62 |
try:
|
| 63 |
from services.stance_model_manager import stance_model_manager
|
| 64 |
-
from services.label_model_manager import kpa_model_manager #
|
| 65 |
logger.info("✓ Gestionnaires de modèles importés")
|
| 66 |
except ImportError as e:
|
| 67 |
logger.warning(f"⚠ Impossible d'importer les gestionnaires de modèles: {e}")
|
| 68 |
|
| 69 |
# --- Vérification MCP ---
|
| 70 |
MCP_ENABLED = False
|
| 71 |
-
mcp_router = None
|
| 72 |
try:
|
| 73 |
from services.mcp_service import init_mcp_server
|
| 74 |
-
from routes.mcp_routes import router as mcp_router
|
| 75 |
MCP_ENABLED = True
|
| 76 |
logger.info("✓ Modules MCP détectés")
|
| 77 |
except ImportError as e:
|
|
@@ -187,37 +185,37 @@ except ImportError as e:
|
|
| 187 |
except Exception as e:
|
| 188 |
logger.warning(f"⚠ Échec chargement route Voice Chat: {e}")
|
| 189 |
|
| 190 |
-
# Main API Routes (KPA, Stance, etc.)
|
| 191 |
-
api_router = None
|
| 192 |
try:
|
| 193 |
-
from routes import api_router
|
| 194 |
app.include_router(api_router, prefix="/api/v1")
|
| 195 |
logger.info("✓ Routes API principales chargées")
|
| 196 |
except ImportError as e:
|
| 197 |
logger.warning(f"⚠ Routes API principales non trouvées: {e}")
|
| 198 |
-
# Fallback
|
| 199 |
try:
|
| 200 |
-
from routes.label import router as kpa_router
|
| 201 |
app.include_router(kpa_router, prefix="/api/v1/kpa", tags=["KPA"])
|
| 202 |
-
from routes.stance import router as stance_router
|
| 203 |
app.include_router(stance_router, prefix="/api/v1/stance", tags=["Stance Detection"])
|
| 204 |
logger.info("✓ Routes KPA et Stance chargées en fallback")
|
| 205 |
except ImportError:
|
| 206 |
-
logger.warning("⚠ Fallback pour KPA/Stance échoué
|
| 207 |
except Exception as e:
|
| 208 |
logger.warning(f"⚠ Échec chargement routes API principales: {e}")
|
| 209 |
|
| 210 |
-
# MCP Routes
|
| 211 |
-
if MCP_ENABLED and mcp_router:
|
| 212 |
-
|
| 213 |
-
|
|
|
|
|
|
|
| 214 |
else:
|
| 215 |
-
logger.warning("⚠
|
| 216 |
|
| 217 |
# --- Basic routes ---
|
| 218 |
@app.get("/health", tags=["Health"])
|
| 219 |
async def health():
|
| 220 |
-
"""Health check endpoint"""
|
| 221 |
health_status = {
|
| 222 |
"status": "healthy",
|
| 223 |
"service": "NLP Debater + Groq Voice",
|
|
@@ -238,8 +236,7 @@ async def health():
|
|
| 238 |
|
| 239 |
@app.get("/", tags=["Root"])
|
| 240 |
async def root():
|
| 241 |
-
|
| 242 |
-
return RedirectResponse(url="/docs") # Modifié: Redirige auto vers Swagger pour interface interactive
|
| 243 |
|
| 244 |
# --- Error handlers ---
|
| 245 |
@app.exception_handler(404)
|
|
@@ -251,14 +248,12 @@ async def not_found_handler(request, exc):
|
|
| 251 |
"POST /api/v1/tts/": "Text to speech",
|
| 252 |
"POST /voice-chat/voice": "Voice chat"
|
| 253 |
}
|
| 254 |
-
|
| 255 |
if MCP_ENABLED:
|
| 256 |
endpoints.update({
|
| 257 |
"GET /api/v1/mcp/health": "Health check MCP",
|
| 258 |
"GET /api/v1/mcp/tools": "Liste outils MCP",
|
| 259 |
"POST /api/v1/mcp/tools/call": "Appel d'outil MCP"
|
| 260 |
})
|
| 261 |
-
|
| 262 |
return {
|
| 263 |
"error": "Not Found",
|
| 264 |
"message": f"URL {request.url} non trouvée",
|
|
|
|
| 61 |
kpa_model_manager = None
|
| 62 |
try:
|
| 63 |
from services.stance_model_manager import stance_model_manager
|
| 64 |
+
from services.label_model_manager import kpa_model_manager # Corrigé : kpa_model_manager partout
|
| 65 |
logger.info("✓ Gestionnaires de modèles importés")
|
| 66 |
except ImportError as e:
|
| 67 |
logger.warning(f"⚠ Impossible d'importer les gestionnaires de modèles: {e}")
|
| 68 |
|
| 69 |
# --- Vérification MCP ---
|
| 70 |
MCP_ENABLED = False
|
|
|
|
| 71 |
try:
|
| 72 |
from services.mcp_service import init_mcp_server
|
|
|
|
| 73 |
MCP_ENABLED = True
|
| 74 |
logger.info("✓ Modules MCP détectés")
|
| 75 |
except ImportError as e:
|
|
|
|
| 185 |
except Exception as e:
|
| 186 |
logger.warning(f"⚠ Échec chargement route Voice Chat: {e}")
|
| 187 |
|
| 188 |
+
# Main API Routes (KPA, Stance, etc.)
|
|
|
|
| 189 |
try:
|
| 190 |
+
from routes import api_router
|
| 191 |
app.include_router(api_router, prefix="/api/v1")
|
| 192 |
logger.info("✓ Routes API principales chargées")
|
| 193 |
except ImportError as e:
|
| 194 |
logger.warning(f"⚠ Routes API principales non trouvées: {e}")
|
| 195 |
+
# Fallback
|
| 196 |
try:
|
| 197 |
+
from routes.label import router as kpa_router
|
| 198 |
app.include_router(kpa_router, prefix="/api/v1/kpa", tags=["KPA"])
|
| 199 |
+
from routes.stance import router as stance_router
|
| 200 |
app.include_router(stance_router, prefix="/api/v1/stance", tags=["Stance Detection"])
|
| 201 |
logger.info("✓ Routes KPA et Stance chargées en fallback")
|
| 202 |
except ImportError:
|
| 203 |
+
logger.warning("⚠ Fallback pour KPA/Stance échoué")
|
| 204 |
except Exception as e:
|
| 205 |
logger.warning(f"⚠ Échec chargement routes API principales: {e}")
|
| 206 |
|
| 207 |
+
# MCP Routes - CORRIGÉ : Supprimé include_router (conflit double prefix) - mount gère tout
|
| 208 |
+
# if MCP_ENABLED and mcp_router: # Commenté pour éviter /mcp/mcp/...
|
| 209 |
+
# app.include_router(mcp_router, prefix="/api/v1", tags=["MCP"])
|
| 210 |
+
# logger.info("✓ Routes MCP chargées")
|
| 211 |
+
if MCP_ENABLED:
|
| 212 |
+
logger.info("✓ MCP monté via lifespan (endpoints auto-gérés)")
|
| 213 |
else:
|
| 214 |
+
logger.warning("⚠ MCP désactivé")
|
| 215 |
|
| 216 |
# --- Basic routes ---
|
| 217 |
@app.get("/health", tags=["Health"])
|
| 218 |
async def health():
|
|
|
|
| 219 |
health_status = {
|
| 220 |
"status": "healthy",
|
| 221 |
"service": "NLP Debater + Groq Voice",
|
|
|
|
| 236 |
|
| 237 |
@app.get("/", tags=["Root"])
|
| 238 |
async def root():
|
| 239 |
+
return RedirectResponse(url="/docs")
|
|
|
|
| 240 |
|
| 241 |
# --- Error handlers ---
|
| 242 |
@app.exception_handler(404)
|
|
|
|
| 248 |
"POST /api/v1/tts/": "Text to speech",
|
| 249 |
"POST /voice-chat/voice": "Voice chat"
|
| 250 |
}
|
|
|
|
| 251 |
if MCP_ENABLED:
|
| 252 |
endpoints.update({
|
| 253 |
"GET /api/v1/mcp/health": "Health check MCP",
|
| 254 |
"GET /api/v1/mcp/tools": "Liste outils MCP",
|
| 255 |
"POST /api/v1/mcp/tools/call": "Appel d'outil MCP"
|
| 256 |
})
|
|
|
|
| 257 |
return {
|
| 258 |
"error": "Not Found",
|
| 259 |
"message": f"URL {request.url} non trouvée",
|
routes/mcp_routes.py
CHANGED
|
@@ -1,15 +1,13 @@
|
|
| 1 |
-
"""Routes pour exposer MCP via FastAPI"""
|
| 2 |
|
| 3 |
from fastapi import APIRouter
|
| 4 |
-
from services.mcp_service import mcp_server
|
| 5 |
|
| 6 |
router = APIRouter(prefix="/mcp", tags=["MCP"])
|
| 7 |
|
| 8 |
-
# Endpoint simple pour health MCP (fallback si mounting direct ne suffit pas)
|
| 9 |
@router.get("/health")
|
| 10 |
async def mcp_health():
|
| 11 |
-
"""Health check pour MCP"""
|
| 12 |
return {"status": "MCP ready", "tools": [t.name for t in mcp_server.tools]}
|
| 13 |
|
| 14 |
-
# Note :
|
| 15 |
-
# Pas besoin de Starlette ici pour éviter les conflits de path/double mounting
|
|
|
|
| 1 |
+
"""Routes pour exposer MCP via FastAPI (fallback minimal)"""
|
| 2 |
|
| 3 |
from fastapi import APIRouter
|
| 4 |
+
from services.mcp_service import mcp_server
|
| 5 |
|
| 6 |
router = APIRouter(prefix="/mcp", tags=["MCP"])
|
| 7 |
|
|
|
|
| 8 |
@router.get("/health")
|
| 9 |
async def mcp_health():
|
| 10 |
+
"""Health check pour MCP (fallback)"""
|
| 11 |
return {"status": "MCP ready", "tools": [t.name for t in mcp_server.tools]}
|
| 12 |
|
| 13 |
+
# Note : Pas de mount ici – tout est géré par init_mcp_server(app.mount)
|
|
|
services/mcp_service.py
CHANGED
|
@@ -4,40 +4,24 @@ from mcp.server.fastmcp import FastMCP
|
|
| 4 |
from typing import Dict, Any
|
| 5 |
import logging
|
| 6 |
|
| 7 |
-
from fastapi import FastAPI
|
| 8 |
|
| 9 |
from services.stance_model_manager import stance_model_manager
|
| 10 |
-
from services.label_model_manager import kpa_model_manager # Corrigé :
|
| 11 |
from services.stt_service import speech_to_text
|
| 12 |
from services.tts_service import text_to_speech
|
| 13 |
from services.chat_service import generate_chat_response
|
| 14 |
-
# Note : Adapte les imports models si tes schémas sont dans un fichier unique (ex. models/schemas.py)
|
| 15 |
-
# from models.stance import StanceRequest, StanceResponse # Si séparé
|
| 16 |
-
# from models.kpa import PredictionRequest, PredictionResponse # Si séparé
|
| 17 |
-
# Ou si un seul fichier models.py :
|
| 18 |
-
# from models import StanceRequest, StanceResponse, PredictionRequest, PredictionResponse
|
| 19 |
|
| 20 |
logger = logging.getLogger(__name__)
|
| 21 |
|
| 22 |
-
# Créer l'instance FastMCP
|
| 23 |
-
mcp_server = FastMCP("NLP-Debater-MCP", json_response=True, stateless_http=False) # Stateful pour sessions
|
| 24 |
|
| 25 |
-
#
|
| 26 |
@mcp_server.tool()
|
| 27 |
def detect_stance(topic: str, argument: str) -> Dict[str, Any]:
|
| 28 |
-
"""
|
| 29 |
-
Détecte la stance (PRO/CON) d'un argument par rapport à un topic.
|
| 30 |
-
|
| 31 |
-
Args:
|
| 32 |
-
topic: Le sujet de débat (ex. "Assisted suicide should be a criminal offence")
|
| 33 |
-
argument: L'argument à classifier (ex. "People have the right to choose...")
|
| 34 |
-
|
| 35 |
-
Returns:
|
| 36 |
-
Dict avec predicted_stance, confidence, probabilities.
|
| 37 |
-
"""
|
| 38 |
if not stance_model_manager.model_loaded:
|
| 39 |
raise ValueError("Modèle stance non chargé")
|
| 40 |
-
|
| 41 |
result = stance_model_manager.predict(topic, argument)
|
| 42 |
return {
|
| 43 |
"predicted_stance": result["predicted_stance"],
|
|
@@ -46,22 +30,10 @@ def detect_stance(topic: str, argument: str) -> Dict[str, Any]:
|
|
| 46 |
"probability_pro": result["probability_pro"]
|
| 47 |
}
|
| 48 |
|
| 49 |
-
# Tool pour Key-Point Argument Matching (KPA)
|
| 50 |
@mcp_server.tool()
|
| 51 |
def match_keypoint_argument(argument: str, key_point: str) -> Dict[str, Any]:
|
| 52 |
-
"""
|
| 53 |
-
Prédit si un argument matche un key-point (apparie/non_apparie).
|
| 54 |
-
|
| 55 |
-
Args:
|
| 56 |
-
argument: Texte de l'argument
|
| 57 |
-
key_point: Le key-point de référence
|
| 58 |
-
|
| 59 |
-
Returns:
|
| 60 |
-
Dict avec prediction (0/1), label, confidence, probabilities.
|
| 61 |
-
"""
|
| 62 |
if not kpa_model_manager.model_loaded:
|
| 63 |
raise ValueError("Modèle KPA non chargé")
|
| 64 |
-
|
| 65 |
result = kpa_model_manager.predict(argument, key_point)
|
| 66 |
return {
|
| 67 |
"prediction": result["prediction"],
|
|
@@ -70,77 +42,35 @@ def match_keypoint_argument(argument: str, key_point: str) -> Dict[str, Any]:
|
|
| 70 |
"probabilities": result["probabilities"]
|
| 71 |
}
|
| 72 |
|
| 73 |
-
# Tool pour STT (Speech-to-Text) - Note : Pour audio, utilise un upload via resource ou adapte
|
| 74 |
@mcp_server.tool()
|
| 75 |
def transcribe_audio(audio_path: str) -> str:
|
| 76 |
-
"""
|
| 77 |
-
Transcrit un fichier audio en texte (via Groq Whisper).
|
| 78 |
-
|
| 79 |
-
Args:
|
| 80 |
-
audio_path: Chemin vers le fichier audio (ex. temp file)
|
| 81 |
-
|
| 82 |
-
Returns:
|
| 83 |
-
Texte transcrit.
|
| 84 |
-
"""
|
| 85 |
return speech_to_text(audio_path)
|
| 86 |
|
| 87 |
-
# Tool pour TTS (Text-to-Speech)
|
| 88 |
@mcp_server.tool()
|
| 89 |
def generate_speech(text: str, voice: str = "Aaliyah-PlayAI", format: str = "wav") -> str:
|
| 90 |
-
"""
|
| 91 |
-
Génère un fichier audio à partir de texte (via Groq TTS).
|
| 92 |
-
|
| 93 |
-
Args:
|
| 94 |
-
text: Texte à synthétiser
|
| 95 |
-
voice: Voix (défaut: Aaliyah-PlayAI)
|
| 96 |
-
format: wav ou mp3
|
| 97 |
-
|
| 98 |
-
Returns:
|
| 99 |
-
Chemin vers le fichier audio généré.
|
| 100 |
-
"""
|
| 101 |
return text_to_speech(text, voice, format)
|
| 102 |
|
| 103 |
-
# Tool pour Argument Generation (Chatbot)
|
| 104 |
@mcp_server.tool()
|
| 105 |
def generate_argument(user_input: str, conversation_id: str = None) -> str:
|
| 106 |
-
"""
|
| 107 |
-
Génère une réponse argumentative via chatbot (via Groq Llama).
|
| 108 |
-
|
| 109 |
-
Args:
|
| 110 |
-
user_input: Input utilisateur
|
| 111 |
-
conversation_id: ID de session (optionnel)
|
| 112 |
-
|
| 113 |
-
Returns:
|
| 114 |
-
Réponse générée.
|
| 115 |
-
"""
|
| 116 |
return generate_chat_response(user_input, conversation_id)
|
| 117 |
|
| 118 |
-
# Resource exemple : Prompt template pour débats (CORRIGÉ : Fonction sans paramètre pour matcher l'URI statique)
|
| 119 |
@mcp_server.resource("debate://prompt")
|
| 120 |
def get_debate_prompt() -> str:
|
| 121 |
-
"""
|
| 122 |
-
Récupère un template de prompt générique pour générer des arguments sur un topic.
|
| 123 |
-
(Note: Pour des params dynamiques, utilise un tool comme detect_stance ou un URI avec {topic}).
|
| 124 |
-
"""
|
| 125 |
return "Tu es un expert en débat. Génère 3 arguments PRO pour le topic donné. Sois concis et persuasif."
|
| 126 |
|
| 127 |
-
def init_mcp_server(app: FastAPI) -> None:
|
| 128 |
"""
|
| 129 |
Initialise et monte le serveur MCP sur l'app FastAPI.
|
| 130 |
-
Ajoute les routes MCP à /api/v1/mcp (ex. : /tools, /call, /resources).
|
| 131 |
"""
|
| 132 |
-
#
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
# Créer l'app MCP streamable (CORRIGÉ : Sans l'argument streamable_http_path invalide)
|
| 136 |
-
mcp_app = mcp_server.streamable_http_app() # Pas de paramètre supplémentaire - le mounting gère le path
|
| 137 |
|
| 138 |
-
#
|
| 139 |
-
app.mount("/api/v1/mcp", mcp_app)
|
| 140 |
|
| 141 |
-
logger.info("✓ Serveur MCP
|
| 142 |
|
| 143 |
-
#
|
| 144 |
@mcp_server.tool()
|
| 145 |
def health_check() -> Dict[str, Any]:
|
| 146 |
return {"status": "healthy", "tools": list(mcp_server.tools.keys())}
|
|
|
|
| 4 |
from typing import Dict, Any
|
| 5 |
import logging
|
| 6 |
|
| 7 |
+
from fastapi import FastAPI
|
| 8 |
|
| 9 |
from services.stance_model_manager import stance_model_manager
|
| 10 |
+
from services.label_model_manager import kpa_model_manager # Corrigé : kpa_model_manager partout
|
| 11 |
from services.stt_service import speech_to_text
|
| 12 |
from services.tts_service import text_to_speech
|
| 13 |
from services.chat_service import generate_chat_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
logger = logging.getLogger(__name__)
|
| 16 |
|
| 17 |
+
# Créer l'instance FastMCP
|
| 18 |
+
mcp_server = FastMCP("NLP-Debater-MCP", json_response=True, stateless_http=False) # Stateful pour sessions
|
| 19 |
|
| 20 |
+
# Tools (inchangés, OK)
|
| 21 |
@mcp_server.tool()
|
| 22 |
def detect_stance(topic: str, argument: str) -> Dict[str, Any]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
if not stance_model_manager.model_loaded:
|
| 24 |
raise ValueError("Modèle stance non chargé")
|
|
|
|
| 25 |
result = stance_model_manager.predict(topic, argument)
|
| 26 |
return {
|
| 27 |
"predicted_stance": result["predicted_stance"],
|
|
|
|
| 30 |
"probability_pro": result["probability_pro"]
|
| 31 |
}
|
| 32 |
|
|
|
|
| 33 |
@mcp_server.tool()
|
| 34 |
def match_keypoint_argument(argument: str, key_point: str) -> Dict[str, Any]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
if not kpa_model_manager.model_loaded:
|
| 36 |
raise ValueError("Modèle KPA non chargé")
|
|
|
|
| 37 |
result = kpa_model_manager.predict(argument, key_point)
|
| 38 |
return {
|
| 39 |
"prediction": result["prediction"],
|
|
|
|
| 42 |
"probabilities": result["probabilities"]
|
| 43 |
}
|
| 44 |
|
|
|
|
| 45 |
@mcp_server.tool()
|
| 46 |
def transcribe_audio(audio_path: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
return speech_to_text(audio_path)
|
| 48 |
|
|
|
|
| 49 |
@mcp_server.tool()
|
| 50 |
def generate_speech(text: str, voice: str = "Aaliyah-PlayAI", format: str = "wav") -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
return text_to_speech(text, voice, format)
|
| 52 |
|
|
|
|
| 53 |
@mcp_server.tool()
|
| 54 |
def generate_argument(user_input: str, conversation_id: str = None) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
return generate_chat_response(user_input, conversation_id)
|
| 56 |
|
|
|
|
| 57 |
@mcp_server.resource("debate://prompt")
|
| 58 |
def get_debate_prompt() -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
return "Tu es un expert en débat. Génère 3 arguments PRO pour le topic donné. Sois concis et persuasif."
|
| 60 |
|
| 61 |
+
def init_mcp_server(app: FastAPI) -> None:
|
| 62 |
"""
|
| 63 |
Initialise et monte le serveur MCP sur l'app FastAPI.
|
|
|
|
| 64 |
"""
|
| 65 |
+
# CORRIGÉ : Utilise mcp_server.app (ASGI app standard) au lieu de streamable_http_app()
|
| 66 |
+
mcp_app = mcp_server.app # L'ASGI app pour mounting clean (gère /health, /tools, etc. nativement)
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
+
# Monte à /api/v1/mcp - FastAPI gère le lifespan auto
|
| 69 |
+
app.mount("/api/v1/mcp", mcp_app)
|
| 70 |
|
| 71 |
+
logger.info("✓ Serveur MCP monté sur /api/v1/mcp avec tools NLP/STT/TTS")
|
| 72 |
|
| 73 |
+
# Health tool optionnel (FastMCP a déjà /health)
|
| 74 |
@mcp_server.tool()
|
| 75 |
def health_check() -> Dict[str, Any]:
|
| 76 |
return {"status": "healthy", "tools": list(mcp_server.tools.keys())}
|