malek-messaoudii
commited on
Commit
·
bb2655d
1
Parent(s):
83c5f9d
delete mcp part
Browse files- mcp/n8n_routes.py +0 -422
- mcp/resources.py +0 -350
- mcp/run_mcp.py +0 -4
- mcp/server.py +0 -68
- mcp/tools.py +0 -223
- mcp/types.py +0 -353
mcp/n8n_routes.py
DELETED
|
@@ -1,422 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Routes FastAPI pour intégration n8n avec MCP
|
| 3 |
-
À ajouter dans votre app.py principal
|
| 4 |
-
"""
|
| 5 |
-
from fastapi import APIRouter, HTTPException, BackgroundTasks, UploadFile, File
|
| 6 |
-
from pydantic import BaseModel
|
| 7 |
-
from typing import Dict, Any, Optional, List
|
| 8 |
-
import logging
|
| 9 |
-
from datetime import datetime
|
| 10 |
-
|
| 11 |
-
logger = logging.getLogger(__name__)
|
| 12 |
-
|
| 13 |
-
# Router pour n8n
|
| 14 |
-
n8n_router = APIRouter(prefix="/n8n", tags=["n8n"])
|
| 15 |
-
|
| 16 |
-
# ==================== MODELS ====================
|
| 17 |
-
|
| 18 |
-
class N8NToolRequest(BaseModel):
|
| 19 |
-
"""Request model pour appels n8n"""
|
| 20 |
-
tool_name: str
|
| 21 |
-
arguments: Dict[str, Any]
|
| 22 |
-
context: Optional[Dict[str, Any]] = None
|
| 23 |
-
async_callback: Optional[str] = None # URL pour callback asynchrone
|
| 24 |
-
|
| 25 |
-
class Config:
|
| 26 |
-
json_schema_extra = {
|
| 27 |
-
"example": {
|
| 28 |
-
"tool_name": "predict_stance",
|
| 29 |
-
"arguments": {
|
| 30 |
-
"topic": "climate change",
|
| 31 |
-
"argument": "We need renewable energy"
|
| 32 |
-
},
|
| 33 |
-
"context": {
|
| 34 |
-
"session_id": "session_123",
|
| 35 |
-
"user_id": "user_456"
|
| 36 |
-
}
|
| 37 |
-
}
|
| 38 |
-
}
|
| 39 |
-
|
| 40 |
-
class N8NBatchRequest(BaseModel):
|
| 41 |
-
"""Request pour traitement batch"""
|
| 42 |
-
tool_name: str
|
| 43 |
-
items: List[Dict[str, Any]]
|
| 44 |
-
batch_size: int = 10
|
| 45 |
-
parallel: bool = False
|
| 46 |
-
|
| 47 |
-
class Config:
|
| 48 |
-
json_schema_extra = {
|
| 49 |
-
"example": {
|
| 50 |
-
"tool_name": "predict_stance",
|
| 51 |
-
"items": [
|
| 52 |
-
{"topic": "AI", "argument": "AI will help humanity"},
|
| 53 |
-
{"topic": "AI", "argument": "AI is dangerous"}
|
| 54 |
-
],
|
| 55 |
-
"batch_size": 10
|
| 56 |
-
}
|
| 57 |
-
}
|
| 58 |
-
|
| 59 |
-
class N8NPipelineRequest(BaseModel):
|
| 60 |
-
"""Request pour pipeline complexe"""
|
| 61 |
-
pipeline_name: str
|
| 62 |
-
input_data: Dict[str, Any]
|
| 63 |
-
steps: List[Dict[str, Any]]
|
| 64 |
-
|
| 65 |
-
class Config:
|
| 66 |
-
json_schema_extra = {
|
| 67 |
-
"example": {
|
| 68 |
-
"pipeline_name": "debate_analysis",
|
| 69 |
-
"input_data": {
|
| 70 |
-
"topic": "climate change",
|
| 71 |
-
"text": "We must act now"
|
| 72 |
-
},
|
| 73 |
-
"steps": [
|
| 74 |
-
{"tool": "predict_stance", "output_key": "stance"},
|
| 75 |
-
{"tool": "predict_kpa", "use_previous": True}
|
| 76 |
-
]
|
| 77 |
-
}
|
| 78 |
-
}
|
| 79 |
-
|
| 80 |
-
class N8NResponse(BaseModel):
|
| 81 |
-
"""Response standardisée pour n8n"""
|
| 82 |
-
success: bool
|
| 83 |
-
data: Optional[Dict[str, Any]] = None
|
| 84 |
-
error: Optional[str] = None
|
| 85 |
-
execution_time: float
|
| 86 |
-
timestamp: datetime = datetime.now()
|
| 87 |
-
|
| 88 |
-
# ==================== ENDPOINTS ====================
|
| 89 |
-
|
| 90 |
-
@n8n_router.post("/execute", response_model=N8NResponse)
|
| 91 |
-
async def execute_tool(request: N8NToolRequest):
|
| 92 |
-
"""
|
| 93 |
-
Endpoint principal pour exécuter un outil MCP depuis n8n
|
| 94 |
-
"""
|
| 95 |
-
import time
|
| 96 |
-
start_time = time.time()
|
| 97 |
-
|
| 98 |
-
try:
|
| 99 |
-
from mcp.server import MCPServer
|
| 100 |
-
from mcp import server # Importer votre instance MCP
|
| 101 |
-
|
| 102 |
-
# Exécuter l'outil
|
| 103 |
-
result = await server.call_tool(
|
| 104 |
-
tool_name=request.tool_name,
|
| 105 |
-
arguments=request.arguments
|
| 106 |
-
)
|
| 107 |
-
|
| 108 |
-
# Ajouter le contexte si fourni
|
| 109 |
-
if request.context:
|
| 110 |
-
result["context"] = request.context
|
| 111 |
-
|
| 112 |
-
execution_time = time.time() - start_time
|
| 113 |
-
|
| 114 |
-
return N8NResponse(
|
| 115 |
-
success=True,
|
| 116 |
-
data=result,
|
| 117 |
-
execution_time=execution_time
|
| 118 |
-
)
|
| 119 |
-
|
| 120 |
-
except Exception as e:
|
| 121 |
-
logger.error(f"Tool execution failed: {str(e)}")
|
| 122 |
-
execution_time = time.time() - start_time
|
| 123 |
-
|
| 124 |
-
return N8NResponse(
|
| 125 |
-
success=False,
|
| 126 |
-
error=str(e),
|
| 127 |
-
execution_time=execution_time
|
| 128 |
-
)
|
| 129 |
-
|
| 130 |
-
@n8n_router.post("/batch", response_model=N8NResponse)
|
| 131 |
-
async def batch_execute(request: N8NBatchRequest):
|
| 132 |
-
"""
|
| 133 |
-
Endpoint pour traitement batch depuis n8n
|
| 134 |
-
"""
|
| 135 |
-
import time
|
| 136 |
-
import asyncio
|
| 137 |
-
start_time = time.time()
|
| 138 |
-
|
| 139 |
-
try:
|
| 140 |
-
from mcp import server
|
| 141 |
-
|
| 142 |
-
results = []
|
| 143 |
-
|
| 144 |
-
# Traitement séquentiel ou parallèle
|
| 145 |
-
if request.parallel:
|
| 146 |
-
# Traitement parallèle
|
| 147 |
-
tasks = []
|
| 148 |
-
for item in request.items:
|
| 149 |
-
task = server.call_tool(
|
| 150 |
-
tool_name=request.tool_name,
|
| 151 |
-
arguments=item
|
| 152 |
-
)
|
| 153 |
-
tasks.append(task)
|
| 154 |
-
|
| 155 |
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
| 156 |
-
else:
|
| 157 |
-
# Traitement séquentiel par batch
|
| 158 |
-
for i in range(0, len(request.items), request.batch_size):
|
| 159 |
-
batch = request.items[i:i + request.batch_size]
|
| 160 |
-
|
| 161 |
-
for item in batch:
|
| 162 |
-
try:
|
| 163 |
-
result = await server.call_tool(
|
| 164 |
-
tool_name=request.tool_name,
|
| 165 |
-
arguments=item
|
| 166 |
-
)
|
| 167 |
-
results.append(result)
|
| 168 |
-
except Exception as e:
|
| 169 |
-
results.append({"error": str(e), "item": item})
|
| 170 |
-
|
| 171 |
-
execution_time = time.time() - start_time
|
| 172 |
-
|
| 173 |
-
return N8NResponse(
|
| 174 |
-
success=True,
|
| 175 |
-
data={
|
| 176 |
-
"results": results,
|
| 177 |
-
"total": len(results),
|
| 178 |
-
"successful": sum(1 for r in results if not isinstance(r, Exception) and "error" not in r),
|
| 179 |
-
"failed": sum(1 for r in results if isinstance(r, Exception) or "error" in r)
|
| 180 |
-
},
|
| 181 |
-
execution_time=execution_time
|
| 182 |
-
)
|
| 183 |
-
|
| 184 |
-
except Exception as e:
|
| 185 |
-
logger.error(f"Batch execution failed: {str(e)}")
|
| 186 |
-
execution_time = time.time() - start_time
|
| 187 |
-
|
| 188 |
-
return N8NResponse(
|
| 189 |
-
success=False,
|
| 190 |
-
error=str(e),
|
| 191 |
-
execution_time=execution_time
|
| 192 |
-
)
|
| 193 |
-
|
| 194 |
-
@n8n_router.post("/pipeline", response_model=N8NResponse)
|
| 195 |
-
async def execute_pipeline(request: N8NPipelineRequest):
|
| 196 |
-
"""
|
| 197 |
-
Endpoint pour exécuter un pipeline multi-étapes
|
| 198 |
-
"""
|
| 199 |
-
import time
|
| 200 |
-
start_time = time.time()
|
| 201 |
-
|
| 202 |
-
try:
|
| 203 |
-
from mcp import server
|
| 204 |
-
|
| 205 |
-
pipeline_context = {"input": request.input_data}
|
| 206 |
-
results = {}
|
| 207 |
-
|
| 208 |
-
for step in request.steps:
|
| 209 |
-
tool_name = step["tool"]
|
| 210 |
-
output_key = step.get("output_key", tool_name)
|
| 211 |
-
use_previous = step.get("use_previous", False)
|
| 212 |
-
|
| 213 |
-
# Préparer les arguments
|
| 214 |
-
if use_previous:
|
| 215 |
-
# Utiliser le résultat de l'étape précédente
|
| 216 |
-
arguments = {**request.input_data, **results}
|
| 217 |
-
else:
|
| 218 |
-
arguments = step.get("arguments", request.input_data)
|
| 219 |
-
|
| 220 |
-
# Exécuter l'étape
|
| 221 |
-
result = await server.call_tool(
|
| 222 |
-
tool_name=tool_name,
|
| 223 |
-
arguments=arguments
|
| 224 |
-
)
|
| 225 |
-
|
| 226 |
-
results[output_key] = result
|
| 227 |
-
pipeline_context[output_key] = result
|
| 228 |
-
|
| 229 |
-
execution_time = time.time() - start_time
|
| 230 |
-
|
| 231 |
-
return N8NResponse(
|
| 232 |
-
success=True,
|
| 233 |
-
data={
|
| 234 |
-
"pipeline": request.pipeline_name,
|
| 235 |
-
"results": results,
|
| 236 |
-
"context": pipeline_context
|
| 237 |
-
},
|
| 238 |
-
execution_time=execution_time
|
| 239 |
-
)
|
| 240 |
-
|
| 241 |
-
except Exception as e:
|
| 242 |
-
logger.error(f"Pipeline execution failed: {str(e)}")
|
| 243 |
-
execution_time = time.time() - start_time
|
| 244 |
-
|
| 245 |
-
return N8NResponse(
|
| 246 |
-
success=False,
|
| 247 |
-
error=str(e),
|
| 248 |
-
execution_time=execution_time
|
| 249 |
-
)
|
| 250 |
-
|
| 251 |
-
@n8n_router.post("/voice-pipeline")
|
| 252 |
-
async def voice_debate_pipeline(
|
| 253 |
-
audio: UploadFile = File(...),
|
| 254 |
-
topic: str = None,
|
| 255 |
-
session_id: str = None
|
| 256 |
-
):
|
| 257 |
-
"""
|
| 258 |
-
Pipeline complet : Audio → STT → Stance → KPA → Argument Generation → TTS
|
| 259 |
-
Optimisé pour n8n
|
| 260 |
-
"""
|
| 261 |
-
import time
|
| 262 |
-
import tempfile
|
| 263 |
-
import os
|
| 264 |
-
start_time = time.time()
|
| 265 |
-
|
| 266 |
-
try:
|
| 267 |
-
from mcp import server
|
| 268 |
-
from services.stt_service import transcribe_audio
|
| 269 |
-
from services.tts_service import text_to_speech
|
| 270 |
-
|
| 271 |
-
# 1. Sauvegarder l'audio temporairement
|
| 272 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
|
| 273 |
-
content = await audio.read()
|
| 274 |
-
tmp.write(content)
|
| 275 |
-
tmp_path = tmp.name
|
| 276 |
-
|
| 277 |
-
try:
|
| 278 |
-
# 2. Speech-to-Text
|
| 279 |
-
transcription = await transcribe_audio(tmp_path)
|
| 280 |
-
user_text = transcription.get("text", "")
|
| 281 |
-
|
| 282 |
-
# 3. Stance Detection
|
| 283 |
-
stance_result = await server.call_tool(
|
| 284 |
-
"predict_stance",
|
| 285 |
-
{"topic": topic, "argument": user_text}
|
| 286 |
-
)
|
| 287 |
-
|
| 288 |
-
# 4. KPA Matching (optionnel)
|
| 289 |
-
# kpa_result = await mcp_server.call_tool(...)
|
| 290 |
-
|
| 291 |
-
# 5. Generate Counter-Argument
|
| 292 |
-
opposite_stance = "CON" if stance_result["predicted_stance"] == "PRO" else "PRO"
|
| 293 |
-
counter_arg_result = await server.call_tool(
|
| 294 |
-
"generate_argument",
|
| 295 |
-
{
|
| 296 |
-
"prompt": f"Generate a {opposite_stance} argument about {topic}",
|
| 297 |
-
"context": f"User said: {user_text}",
|
| 298 |
-
"stance": opposite_stance
|
| 299 |
-
}
|
| 300 |
-
)
|
| 301 |
-
|
| 302 |
-
# 6. Text-to-Speech du contre-argument
|
| 303 |
-
tts_audio_path = await text_to_speech(
|
| 304 |
-
counter_arg_result["generated_argument"]
|
| 305 |
-
)
|
| 306 |
-
|
| 307 |
-
execution_time = time.time() - start_time
|
| 308 |
-
|
| 309 |
-
return N8NResponse(
|
| 310 |
-
success=True,
|
| 311 |
-
data={
|
| 312 |
-
"transcription": user_text,
|
| 313 |
-
"stance_analysis": stance_result,
|
| 314 |
-
"counter_argument": counter_arg_result,
|
| 315 |
-
"audio_response_path": tts_audio_path,
|
| 316 |
-
"session_id": session_id
|
| 317 |
-
},
|
| 318 |
-
execution_time=execution_time
|
| 319 |
-
)
|
| 320 |
-
|
| 321 |
-
finally:
|
| 322 |
-
# Nettoyer le fichier temporaire
|
| 323 |
-
if os.path.exists(tmp_path):
|
| 324 |
-
os.remove(tmp_path)
|
| 325 |
-
|
| 326 |
-
except Exception as e:
|
| 327 |
-
logger.error(f"Voice pipeline failed: {str(e)}")
|
| 328 |
-
return N8NResponse(
|
| 329 |
-
success=False,
|
| 330 |
-
error=str(e),
|
| 331 |
-
execution_time=time.time() - start_time
|
| 332 |
-
)
|
| 333 |
-
|
| 334 |
-
@n8n_router.get("/tools")
|
| 335 |
-
async def list_tools():
|
| 336 |
-
"""
|
| 337 |
-
Liste tous les outils disponibles (format n8n-friendly)
|
| 338 |
-
"""
|
| 339 |
-
try:
|
| 340 |
-
from mcp import server
|
| 341 |
-
tools = await server.list_tools()
|
| 342 |
-
|
| 343 |
-
return {
|
| 344 |
-
"success": True,
|
| 345 |
-
"tools": tools,
|
| 346 |
-
"total": len(tools)
|
| 347 |
-
}
|
| 348 |
-
except Exception as e:
|
| 349 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 350 |
-
|
| 351 |
-
@n8n_router.get("/resources")
|
| 352 |
-
async def list_resources():
|
| 353 |
-
"""
|
| 354 |
-
Liste toutes les ressources disponibles (format n8n-friendly)
|
| 355 |
-
"""
|
| 356 |
-
try:
|
| 357 |
-
from mcp import server
|
| 358 |
-
resources = await server.list_resources()
|
| 359 |
-
|
| 360 |
-
return {
|
| 361 |
-
"success": True,
|
| 362 |
-
"resources": resources,
|
| 363 |
-
"total": len(resources)
|
| 364 |
-
}
|
| 365 |
-
except Exception as e:
|
| 366 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 367 |
-
|
| 368 |
-
@n8n_router.get("/health")
|
| 369 |
-
async def health_check():
|
| 370 |
-
"""
|
| 371 |
-
Health check pour n8n monitoring
|
| 372 |
-
"""
|
| 373 |
-
from services.stance_model_manager import stance_model_manager
|
| 374 |
-
from services.label_model_manager import kpa_model_manager
|
| 375 |
-
|
| 376 |
-
return {
|
| 377 |
-
"status": "healthy",
|
| 378 |
-
"timestamp": datetime.now().isoformat(),
|
| 379 |
-
"models": {
|
| 380 |
-
"stance": stance_model_manager.model_loaded if stance_model_manager else False,
|
| 381 |
-
"kpa": kpa_model_manager.model_loaded if kpa_model_manager else False
|
| 382 |
-
},
|
| 383 |
-
"services": {
|
| 384 |
-
"stt": True, # Vérifier si GROQ_API_KEY existe
|
| 385 |
-
"tts": True,
|
| 386 |
-
"chat": True
|
| 387 |
-
}
|
| 388 |
-
}
|
| 389 |
-
|
| 390 |
-
# ==================== WEBHOOKS ====================
|
| 391 |
-
|
| 392 |
-
@n8n_router.post("/webhook/debate-result")
|
| 393 |
-
async def webhook_debate_result(data: Dict[str, Any], background_tasks: BackgroundTasks):
|
| 394 |
-
"""
|
| 395 |
-
Webhook pour recevoir les résultats de débat depuis n8n
|
| 396 |
-
Peut être utilisé pour stocker, notifier, etc.
|
| 397 |
-
"""
|
| 398 |
-
logger.info(f"Received debate result webhook: {data}")
|
| 399 |
-
|
| 400 |
-
# Traiter en arrière-plan
|
| 401 |
-
background_tasks.add_task(process_debate_result, data)
|
| 402 |
-
|
| 403 |
-
return {"status": "received", "message": "Processing in background"}
|
| 404 |
-
|
| 405 |
-
async def process_debate_result(data: Dict[str, Any]):
|
| 406 |
-
"""
|
| 407 |
-
Traiter les résultats de débat en arrière-plan
|
| 408 |
-
"""
|
| 409 |
-
# TODO: Implémenter votre logique
|
| 410 |
-
# - Sauvegarder dans DB
|
| 411 |
-
# - Envoyer des notifications
|
| 412 |
-
# - Mettre à jour des métriques
|
| 413 |
-
logger.info(f"Processing debate result: {data}")
|
| 414 |
-
|
| 415 |
-
# ==================== EXPORT ====================
|
| 416 |
-
|
| 417 |
-
def register_n8n_routes(app):
|
| 418 |
-
"""
|
| 419 |
-
Enregistrer les routes n8n dans l'application FastAPI
|
| 420 |
-
"""
|
| 421 |
-
app.include_router(n8n_router)
|
| 422 |
-
logger.info("n8n routes registered successfully")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mcp/resources.py
DELETED
|
@@ -1,350 +0,0 @@
|
|
| 1 |
-
from typing import List, Dict, Any
|
| 2 |
-
from mcp import Resource
|
| 3 |
-
from datetime import datetime
|
| 4 |
-
import logging
|
| 5 |
-
|
| 6 |
-
logger = logging.getLogger(__name__)
|
| 7 |
-
|
| 8 |
-
class ModelResource(Resource):
|
| 9 |
-
"""Resource representing a loaded model"""
|
| 10 |
-
def __init__(self, model_name: str, model_info: Dict[str, Any]):
|
| 11 |
-
self.model_name = model_name
|
| 12 |
-
self.model_info = model_info
|
| 13 |
-
super().__init__(
|
| 14 |
-
uri=f"model://{model_name}",
|
| 15 |
-
name=model_name,
|
| 16 |
-
description=f"{model_name} model information and status",
|
| 17 |
-
mime_type="application/json"
|
| 18 |
-
)
|
| 19 |
-
|
| 20 |
-
async def get_content(self) -> str:
|
| 21 |
-
"""Get model information as JSON"""
|
| 22 |
-
import json
|
| 23 |
-
return json.dumps({
|
| 24 |
-
**self.model_info,
|
| 25 |
-
"timestamp": datetime.now().isoformat(),
|
| 26 |
-
"uri": self.uri
|
| 27 |
-
})
|
| 28 |
-
|
| 29 |
-
class StanceDetectionResource(ModelResource):
|
| 30 |
-
"""Resource for stance detection model"""
|
| 31 |
-
def __init__(self):
|
| 32 |
-
from services.stance_model_manager import stance_model_manager
|
| 33 |
-
|
| 34 |
-
model_info = {
|
| 35 |
-
"type": "stance_detection",
|
| 36 |
-
"description": "Detects PRO/CON stance for topic-argument pairs",
|
| 37 |
-
"capabilities": ["single_prediction", "batch_prediction"],
|
| 38 |
-
"input_format": {"topic": "string", "argument": "string"},
|
| 39 |
-
"output_format": {
|
| 40 |
-
"predicted_stance": "PRO/CON",
|
| 41 |
-
"confidence": "float",
|
| 42 |
-
"probabilities": {"PRO": "float", "CON": "float"}
|
| 43 |
-
}
|
| 44 |
-
}
|
| 45 |
-
|
| 46 |
-
if stance_model_manager and stance_model_manager.model_loaded:
|
| 47 |
-
model_info.update({
|
| 48 |
-
"loaded": True,
|
| 49 |
-
"device": str(stance_model_manager.device),
|
| 50 |
-
"model_id": getattr(stance_model_manager, 'model_id', 'unknown')
|
| 51 |
-
})
|
| 52 |
-
else:
|
| 53 |
-
model_info["loaded"] = False
|
| 54 |
-
|
| 55 |
-
super().__init__("stance_detection", model_info)
|
| 56 |
-
|
| 57 |
-
class KPAResource(ModelResource):
|
| 58 |
-
"""Resource for Keypoint-Argument matching model"""
|
| 59 |
-
def __init__(self):
|
| 60 |
-
from services.label_model_manager import kpa_model_manager
|
| 61 |
-
|
| 62 |
-
model_info = {
|
| 63 |
-
"type": "keypoint_argument_matching",
|
| 64 |
-
"description": "Matches arguments with key points (apparie/non_apparie)",
|
| 65 |
-
"capabilities": ["single_prediction", "batch_prediction"],
|
| 66 |
-
"input_format": {"argument": "string", "key_point": "string"},
|
| 67 |
-
"output_format": {
|
| 68 |
-
"prediction": "0/1",
|
| 69 |
-
"label": "apparie/non_apparie",
|
| 70 |
-
"confidence": "float",
|
| 71 |
-
"probabilities": {"non_apparie": "float", "apparie": "float"}
|
| 72 |
-
}
|
| 73 |
-
}
|
| 74 |
-
|
| 75 |
-
if kpa_model_manager and kpa_model_manager.model_loaded:
|
| 76 |
-
model_info.update({
|
| 77 |
-
"loaded": True,
|
| 78 |
-
"device": str(kpa_model_manager.device),
|
| 79 |
-
"model_id": getattr(kpa_model_manager, 'model_id', 'unknown'),
|
| 80 |
-
"max_length": getattr(kpa_model_manager, 'max_length', 256)
|
| 81 |
-
})
|
| 82 |
-
else:
|
| 83 |
-
model_info["loaded"] = False
|
| 84 |
-
|
| 85 |
-
super().__init__("kpa_matching", model_info)
|
| 86 |
-
|
| 87 |
-
class STTResource(Resource):
|
| 88 |
-
"""Resource for Speech-to-Text capabilities"""
|
| 89 |
-
def __init__(self):
|
| 90 |
-
from config import GROQ_API_KEY, GROQ_STT_MODEL
|
| 91 |
-
|
| 92 |
-
super().__init__(
|
| 93 |
-
uri="service://speech-to-text",
|
| 94 |
-
name="speech_to_text",
|
| 95 |
-
description="Speech-to-Text service using Groq Whisper API",
|
| 96 |
-
mime_type="application/json"
|
| 97 |
-
)
|
| 98 |
-
|
| 99 |
-
self.config = {
|
| 100 |
-
"provider": "Groq",
|
| 101 |
-
"model": GROQ_STT_MODEL,
|
| 102 |
-
"enabled": bool(GROQ_API_KEY),
|
| 103 |
-
"language": "English only",
|
| 104 |
-
"max_audio_size": "10MB",
|
| 105 |
-
"supported_formats": ["wav", "mp3", "m4a", "mp4"]
|
| 106 |
-
}
|
| 107 |
-
|
| 108 |
-
async def get_content(self) -> str:
|
| 109 |
-
"""Get STT service information"""
|
| 110 |
-
import json
|
| 111 |
-
return json.dumps({
|
| 112 |
-
**self.config,
|
| 113 |
-
"timestamp": datetime.now().isoformat(),
|
| 114 |
-
"uri": self.uri
|
| 115 |
-
})
|
| 116 |
-
|
| 117 |
-
class TTSResource(Resource):
|
| 118 |
-
"""Resource for Text-to-Speech capabilities"""
|
| 119 |
-
def __init__(self):
|
| 120 |
-
from config import GROQ_API_KEY, GROQ_TTS_MODEL, GROQ_TTS_VOICE
|
| 121 |
-
|
| 122 |
-
super().__init__(
|
| 123 |
-
uri="service://text-to-speech",
|
| 124 |
-
name="text_to_speech",
|
| 125 |
-
description="Text-to-Speech service using Groq PlayAI TTS",
|
| 126 |
-
mime_type="application/json"
|
| 127 |
-
)
|
| 128 |
-
|
| 129 |
-
self.config = {
|
| 130 |
-
"provider": "Groq",
|
| 131 |
-
"model": GROQ_TTS_MODEL,
|
| 132 |
-
"voice": GROQ_TTS_VOICE,
|
| 133 |
-
"enabled": bool(GROQ_API_KEY),
|
| 134 |
-
"language": "English only",
|
| 135 |
-
"format": "wav/mp3",
|
| 136 |
-
"voices_available": ["Aaliyah-PlayAI", "Aria-PlayAI", "Dexter-PlayAI", "Fiona-PlayAI"]
|
| 137 |
-
}
|
| 138 |
-
|
| 139 |
-
async def get_content(self) -> str:
|
| 140 |
-
"""Get TTS service information"""
|
| 141 |
-
import json
|
| 142 |
-
return json.dumps({
|
| 143 |
-
**self.config,
|
| 144 |
-
"timestamp": datetime.now().isoformat(),
|
| 145 |
-
"uri": self.uri
|
| 146 |
-
})
|
| 147 |
-
|
| 148 |
-
class ChatbotResource(Resource):
|
| 149 |
-
"""Resource for Chatbot capabilities"""
|
| 150 |
-
def __init__(self):
|
| 151 |
-
from config import GROQ_API_KEY, GROQ_CHAT_MODEL
|
| 152 |
-
|
| 153 |
-
super().__init__(
|
| 154 |
-
uri="service://chatbot",
|
| 155 |
-
name="chatbot",
|
| 156 |
-
description="Chatbot service using Groq LLM API",
|
| 157 |
-
mime_type="application/json"
|
| 158 |
-
)
|
| 159 |
-
|
| 160 |
-
self.config = {
|
| 161 |
-
"provider": "Groq",
|
| 162 |
-
"model": GROQ_CHAT_MODEL,
|
| 163 |
-
"enabled": bool(GROQ_API_KEY),
|
| 164 |
-
"language": "English only",
|
| 165 |
-
"features": ["conversation", "context_awareness", "voice_chat"],
|
| 166 |
-
"max_context_length": 8192
|
| 167 |
-
}
|
| 168 |
-
|
| 169 |
-
async def get_content(self) -> str:
|
| 170 |
-
"""Get chatbot service information"""
|
| 171 |
-
import json
|
| 172 |
-
return json.dumps({
|
| 173 |
-
**self.config,
|
| 174 |
-
"timestamp": datetime.now().isoformat(),
|
| 175 |
-
"uri": self.uri
|
| 176 |
-
})
|
| 177 |
-
|
| 178 |
-
class ArgumentGenerationResource(Resource):
|
| 179 |
-
"""Resource for Argument Generation model (à compléter)"""
|
| 180 |
-
def __init__(self):
|
| 181 |
-
super().__init__(
|
| 182 |
-
uri="model://argument-generation",
|
| 183 |
-
name="argument_generation",
|
| 184 |
-
description="Persuasive argument generation model",
|
| 185 |
-
mime_type="application/json"
|
| 186 |
-
)
|
| 187 |
-
|
| 188 |
-
self.config = {
|
| 189 |
-
"type": "argument_generation",
|
| 190 |
-
"status": "not_implemented",
|
| 191 |
-
"description": "TODO: Implement your argument generation model",
|
| 192 |
-
"planned_capabilities": [
|
| 193 |
-
"single_argument_generation",
|
| 194 |
-
"batch_generation",
|
| 195 |
-
"stance_controlled_generation",
|
| 196 |
-
"counter_argument_generation"
|
| 197 |
-
]
|
| 198 |
-
}
|
| 199 |
-
|
| 200 |
-
async def get_content(self) -> str:
|
| 201 |
-
"""Get argument generation model information"""
|
| 202 |
-
import json
|
| 203 |
-
return json.dumps({
|
| 204 |
-
**self.config,
|
| 205 |
-
"timestamp": datetime.now().isoformat(),
|
| 206 |
-
"uri": self.uri,
|
| 207 |
-
"note": "This is a placeholder. Implement your model in services/argument_generation.py"
|
| 208 |
-
})
|
| 209 |
-
|
| 210 |
-
class SystemHealthResource(Resource):
|
| 211 |
-
"""Resource for system health and status"""
|
| 212 |
-
def __init__(self):
|
| 213 |
-
super().__init__(
|
| 214 |
-
uri="system://health",
|
| 215 |
-
name="system_health",
|
| 216 |
-
description="System health and service status",
|
| 217 |
-
mime_type="application/json"
|
| 218 |
-
)
|
| 219 |
-
|
| 220 |
-
async def get_content(self) -> str:
|
| 221 |
-
"""Get system health information"""
|
| 222 |
-
import json
|
| 223 |
-
from datetime import datetime
|
| 224 |
-
|
| 225 |
-
# Collect model status
|
| 226 |
-
model_status = {}
|
| 227 |
-
try:
|
| 228 |
-
from services.stance_model_manager import stance_model_manager
|
| 229 |
-
model_status["stance_detection"] = {
|
| 230 |
-
"loaded": stance_model_manager.model_loaded if stance_model_manager else False
|
| 231 |
-
}
|
| 232 |
-
except:
|
| 233 |
-
model_status["stance_detection"] = {"loaded": False}
|
| 234 |
-
|
| 235 |
-
try:
|
| 236 |
-
from services.label_model_manager import kpa_model_manager
|
| 237 |
-
model_status["kpa_matching"] = {
|
| 238 |
-
"loaded": kpa_model_manager.model_loaded if kpa_model_manager else False
|
| 239 |
-
}
|
| 240 |
-
except:
|
| 241 |
-
model_status["kpa_matching"] = {"loaded": False}
|
| 242 |
-
|
| 243 |
-
# Service status
|
| 244 |
-
from config import GROQ_API_KEY
|
| 245 |
-
service_status = {
|
| 246 |
-
"stt": bool(GROQ_API_KEY),
|
| 247 |
-
"tts": bool(GROQ_API_KEY),
|
| 248 |
-
"chatbot": bool(GROQ_API_KEY),
|
| 249 |
-
"argument_generation": False # À implémenter
|
| 250 |
-
}
|
| 251 |
-
|
| 252 |
-
return json.dumps({
|
| 253 |
-
"timestamp": datetime.now().isoformat(),
|
| 254 |
-
"status": "operational",
|
| 255 |
-
"models": model_status,
|
| 256 |
-
"services": service_status,
|
| 257 |
-
"api_version": "1.0.0",
|
| 258 |
-
"mcp_version": "1.0.0"
|
| 259 |
-
})
|
| 260 |
-
|
| 261 |
-
class APIDocumentationResource(Resource):
|
| 262 |
-
"""Resource for API documentation"""
|
| 263 |
-
def __init__(self):
|
| 264 |
-
super().__init__(
|
| 265 |
-
uri="documentation://api",
|
| 266 |
-
name="api_documentation",
|
| 267 |
-
description="API endpoints documentation",
|
| 268 |
-
mime_type="application/json"
|
| 269 |
-
)
|
| 270 |
-
|
| 271 |
-
self.documentation = {
|
| 272 |
-
"endpoints": {
|
| 273 |
-
"mcp": {
|
| 274 |
-
"/mcp/health": "GET - Health check",
|
| 275 |
-
"/mcp/resources": "GET - List all resources",
|
| 276 |
-
"/mcp/tools": "GET - List all tools",
|
| 277 |
-
"/mcp/tools/call": "POST - Call a tool"
|
| 278 |
-
},
|
| 279 |
-
"models": {
|
| 280 |
-
"/api/v1/kpa/predict": "POST - KPA prediction",
|
| 281 |
-
"/api/v1/stance/predict": "POST - Stance prediction",
|
| 282 |
-
"/api/v1/stance/batch-predict": "POST - Batch stance prediction"
|
| 283 |
-
},
|
| 284 |
-
"voice": {
|
| 285 |
-
"/api/v1/stt/": "POST - Speech to text",
|
| 286 |
-
"/api/v1/tts/": "POST - Text to speech",
|
| 287 |
-
"/voice-chat/voice": "POST - Voice chat",
|
| 288 |
-
"/voice-chat/text": "POST - Text chat"
|
| 289 |
-
}
|
| 290 |
-
},
|
| 291 |
-
"authentication": "Currently none (add JWT or API key based auth)",
|
| 292 |
-
"rate_limits": "None configured",
|
| 293 |
-
"version": "2.0.0"
|
| 294 |
-
}
|
| 295 |
-
|
| 296 |
-
async def get_content(self) -> str:
|
| 297 |
-
"""Get API documentation"""
|
| 298 |
-
import json
|
| 299 |
-
return json.dumps({
|
| 300 |
-
**self.documentation,
|
| 301 |
-
"timestamp": datetime.now().isoformat(),
|
| 302 |
-
"uri": self.uri
|
| 303 |
-
})
|
| 304 |
-
|
| 305 |
-
def get_resources() -> List[Resource]:
|
| 306 |
-
"""Return all available MCP resources"""
|
| 307 |
-
resources = []
|
| 308 |
-
|
| 309 |
-
try:
|
| 310 |
-
resources.append(StanceDetectionResource())
|
| 311 |
-
except Exception as e:
|
| 312 |
-
logger.warning(f"Failed to create StanceDetectionResource: {e}")
|
| 313 |
-
|
| 314 |
-
try:
|
| 315 |
-
resources.append(KPAResource())
|
| 316 |
-
except Exception as e:
|
| 317 |
-
logger.warning(f"Failed to create KPAResource: {e}")
|
| 318 |
-
|
| 319 |
-
try:
|
| 320 |
-
resources.append(STTResource())
|
| 321 |
-
except Exception as e:
|
| 322 |
-
logger.warning(f"Failed to create STTResource: {e}")
|
| 323 |
-
|
| 324 |
-
try:
|
| 325 |
-
resources.append(TTSResource())
|
| 326 |
-
except Exception as e:
|
| 327 |
-
logger.warning(f"Failed to create TTSResource: {e}")
|
| 328 |
-
|
| 329 |
-
try:
|
| 330 |
-
resources.append(ChatbotResource())
|
| 331 |
-
except Exception as e:
|
| 332 |
-
logger.warning(f"Failed to create ChatbotResource: {e}")
|
| 333 |
-
|
| 334 |
-
try:
|
| 335 |
-
resources.append(ArgumentGenerationResource())
|
| 336 |
-
except Exception as e:
|
| 337 |
-
logger.warning(f"Failed to create ArgumentGenerationResource: {e}")
|
| 338 |
-
|
| 339 |
-
try:
|
| 340 |
-
resources.append(SystemHealthResource())
|
| 341 |
-
except Exception as e:
|
| 342 |
-
logger.warning(f"Failed to create SystemHealthResource: {e}")
|
| 343 |
-
|
| 344 |
-
try:
|
| 345 |
-
resources.append(APIDocumentationResource())
|
| 346 |
-
except Exception as e:
|
| 347 |
-
logger.warning(f"Failed to create APIDocumentationResource: {e}")
|
| 348 |
-
|
| 349 |
-
logger.info(f"Created {len(resources)} MCP resources")
|
| 350 |
-
return resources
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mcp/run_mcp.py
DELETED
|
@@ -1,4 +0,0 @@
|
|
| 1 |
-
from server import run_mcp
|
| 2 |
-
|
| 3 |
-
if __name__ == "__main__":
|
| 4 |
-
run_mcp()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mcp/server.py
DELETED
|
@@ -1,68 +0,0 @@
|
|
| 1 |
-
from typing import Any, List, Optional
|
| 2 |
-
import json
|
| 3 |
-
from fastapi import FastAPI, HTTPException
|
| 4 |
-
from mcp import Server, Resource, Tool
|
| 5 |
-
from mcp.types import TextContent, ImageContent
|
| 6 |
-
import logging
|
| 7 |
-
from .resources import get_resources
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
logger = logging.getLogger(__name__)
|
| 11 |
-
|
| 12 |
-
class MCPServer:
|
| 13 |
-
def __init__(self, app: FastAPI):
|
| 14 |
-
self.app = app
|
| 15 |
-
self.server = Server()
|
| 16 |
-
self._setup_resources()
|
| 17 |
-
self._setup_tools()
|
| 18 |
-
|
| 19 |
-
def _setup_resources(self):
|
| 20 |
-
"""Définir les ressources exposées via MCP"""
|
| 21 |
-
|
| 22 |
-
resources = get_resources()
|
| 23 |
-
for resource in resources:
|
| 24 |
-
self.server.add_resource(resource)
|
| 25 |
-
|
| 26 |
-
def _setup_tools(self):
|
| 27 |
-
"""Définir les outils exposés via MCP"""
|
| 28 |
-
from .tools import get_tools
|
| 29 |
-
|
| 30 |
-
tools = get_tools()
|
| 31 |
-
for tool in tools:
|
| 32 |
-
self.server.add_tool(tool)
|
| 33 |
-
|
| 34 |
-
async def list_resources(self) -> List[dict]:
|
| 35 |
-
"""Lister toutes les ressources disponibles"""
|
| 36 |
-
return [
|
| 37 |
-
{
|
| 38 |
-
"uri": resource.uri,
|
| 39 |
-
"name": resource.name,
|
| 40 |
-
"description": resource.description,
|
| 41 |
-
"mime_type": resource.mime_type
|
| 42 |
-
}
|
| 43 |
-
for resource in self.server.resources.values()
|
| 44 |
-
]
|
| 45 |
-
|
| 46 |
-
async def list_tools(self) -> List[dict]:
|
| 47 |
-
"""Lister tous les outils disponibles"""
|
| 48 |
-
return [
|
| 49 |
-
{
|
| 50 |
-
"name": tool.name,
|
| 51 |
-
"description": tool.description,
|
| 52 |
-
"input_schema": tool.input_schema
|
| 53 |
-
}
|
| 54 |
-
for tool in self.server.tools.values()
|
| 55 |
-
]
|
| 56 |
-
|
| 57 |
-
async def call_tool(self, tool_name: str, arguments: dict) -> Any:
|
| 58 |
-
"""Appeler un outil MCP"""
|
| 59 |
-
if tool_name not in self.server.tools:
|
| 60 |
-
raise HTTPException(status_code=404, detail=f"Tool {tool_name} not found")
|
| 61 |
-
|
| 62 |
-
try:
|
| 63 |
-
tool = self.server.tools[tool_name]
|
| 64 |
-
result = await tool.execute(arguments)
|
| 65 |
-
return result
|
| 66 |
-
except Exception as e:
|
| 67 |
-
logger.error(f"Tool execution error: {str(e)}")
|
| 68 |
-
raise HTTPException(status_code=500, detail=f"Tool execution failed: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mcp/tools.py
DELETED
|
@@ -1,223 +0,0 @@
|
|
| 1 |
-
from typing import Dict, Any, List
|
| 2 |
-
from mcp import Tool
|
| 3 |
-
import logging
|
| 4 |
-
from services import (
|
| 5 |
-
kpa_model_manager,
|
| 6 |
-
stance_model_manager,
|
| 7 |
-
chat_service
|
| 8 |
-
)
|
| 9 |
-
|
| 10 |
-
logger = logging.getLogger(__name__)
|
| 11 |
-
|
| 12 |
-
async def predict_kpa_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
|
| 13 |
-
"""Tool for keypoint-argument matching prediction"""
|
| 14 |
-
try:
|
| 15 |
-
argument = arguments.get("argument", "")
|
| 16 |
-
key_point = arguments.get("key_point", "")
|
| 17 |
-
|
| 18 |
-
if not argument or not key_point:
|
| 19 |
-
return {"error": "Both argument and key_point are required"}
|
| 20 |
-
|
| 21 |
-
result = kpa_model_manager.predict(argument, key_point)
|
| 22 |
-
return {
|
| 23 |
-
"prediction": result["prediction"],
|
| 24 |
-
"label": result["label"],
|
| 25 |
-
"confidence": result["confidence"],
|
| 26 |
-
"probabilities": result["probabilities"]
|
| 27 |
-
}
|
| 28 |
-
except Exception as e:
|
| 29 |
-
logger.error(f"KPA tool error: {str(e)}")
|
| 30 |
-
return {"error": str(e)}
|
| 31 |
-
|
| 32 |
-
async def predict_stance_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
|
| 33 |
-
"""Tool for stance detection prediction"""
|
| 34 |
-
try:
|
| 35 |
-
topic = arguments.get("topic", "")
|
| 36 |
-
argument = arguments.get("argument", "")
|
| 37 |
-
|
| 38 |
-
if not topic or not argument:
|
| 39 |
-
return {"error": "Both topic and argument are required"}
|
| 40 |
-
|
| 41 |
-
result = stance_model_manager.predict(topic, argument)
|
| 42 |
-
return {
|
| 43 |
-
"predicted_stance": result["predicted_stance"],
|
| 44 |
-
"confidence": result["confidence"],
|
| 45 |
-
"probability_con": result["probability_con"],
|
| 46 |
-
"probability_pro": result["probability_pro"]
|
| 47 |
-
}
|
| 48 |
-
except Exception as e:
|
| 49 |
-
logger.error(f"Stance tool error: {str(e)}")
|
| 50 |
-
return {"error": str(e)}
|
| 51 |
-
|
| 52 |
-
async def batch_stance_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
|
| 53 |
-
"""Tool for batch stance detection"""
|
| 54 |
-
try:
|
| 55 |
-
items = arguments.get("items", [])
|
| 56 |
-
|
| 57 |
-
if not items:
|
| 58 |
-
return {"error": "Items list is required"}
|
| 59 |
-
|
| 60 |
-
results = []
|
| 61 |
-
for item in items:
|
| 62 |
-
result = stance_model_manager.predict(item["topic"], item["argument"])
|
| 63 |
-
results.append({
|
| 64 |
-
"topic": item["topic"],
|
| 65 |
-
"argument": item["argument"],
|
| 66 |
-
**result
|
| 67 |
-
})
|
| 68 |
-
|
| 69 |
-
return {
|
| 70 |
-
"results": results,
|
| 71 |
-
"total_processed": len(results)
|
| 72 |
-
}
|
| 73 |
-
except Exception as e:
|
| 74 |
-
logger.error(f"Batch stance tool error: {str(e)}")
|
| 75 |
-
return {"error": str(e)}
|
| 76 |
-
|
| 77 |
-
async def generate_argument_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
|
| 78 |
-
"""Tool for argument generation (à compléter avec votre modèle)"""
|
| 79 |
-
try:
|
| 80 |
-
prompt = arguments.get("prompt", "")
|
| 81 |
-
context = arguments.get("context", "")
|
| 82 |
-
|
| 83 |
-
if not prompt:
|
| 84 |
-
return {"error": "Prompt is required"}
|
| 85 |
-
|
| 86 |
-
# TODO: Intégrer votre modèle d'argument generation ici
|
| 87 |
-
# Pour l'instant, placeholder
|
| 88 |
-
from services.chat_service import generate_chat_response
|
| 89 |
-
|
| 90 |
-
response = generate_chat_response(
|
| 91 |
-
user_input=f"Generate argument for: {prompt}. Context: {context}",
|
| 92 |
-
system_prompt="You are an argument generation assistant. Generate persuasive arguments based on the given prompt and context."
|
| 93 |
-
)
|
| 94 |
-
|
| 95 |
-
return {
|
| 96 |
-
"generated_argument": response,
|
| 97 |
-
"prompt": prompt,
|
| 98 |
-
"context": context
|
| 99 |
-
}
|
| 100 |
-
except Exception as e:
|
| 101 |
-
logger.error(f"Argument generation tool error: {str(e)}")
|
| 102 |
-
return {"error": str(e)}
|
| 103 |
-
|
| 104 |
-
async def voice_chat_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
|
| 105 |
-
"""Tool for voice chat interaction"""
|
| 106 |
-
try:
|
| 107 |
-
text = arguments.get("text", "")
|
| 108 |
-
conversation_id = arguments.get("conversation_id", "")
|
| 109 |
-
|
| 110 |
-
if not text:
|
| 111 |
-
return {"error": "Text input is required"}
|
| 112 |
-
|
| 113 |
-
# Utiliser le service de chat existant
|
| 114 |
-
from services.chat_service import generate_chat_response
|
| 115 |
-
|
| 116 |
-
response = generate_chat_response(
|
| 117 |
-
user_input=text,
|
| 118 |
-
conversation_id=conversation_id if conversation_id else None
|
| 119 |
-
)
|
| 120 |
-
|
| 121 |
-
# Optionnel: Ajouter TTS si nécessaire
|
| 122 |
-
tts_required = arguments.get("tts", False)
|
| 123 |
-
audio_url = None
|
| 124 |
-
|
| 125 |
-
if tts_required:
|
| 126 |
-
from services.tts_service import text_to_speech
|
| 127 |
-
# TODO: Gérer le stockage et l'URL de l'audio
|
| 128 |
-
|
| 129 |
-
return {
|
| 130 |
-
"response": response,
|
| 131 |
-
"conversation_id": conversation_id,
|
| 132 |
-
"has_audio": tts_required,
|
| 133 |
-
"audio_url": audio_url
|
| 134 |
-
}
|
| 135 |
-
except Exception as e:
|
| 136 |
-
logger.error(f"Voice chat tool error: {str(e)}")
|
| 137 |
-
return {"error": str(e)}
|
| 138 |
-
|
| 139 |
-
def get_tools() -> List[Tool]:
|
| 140 |
-
"""Retourne tous les outils disponibles"""
|
| 141 |
-
return [
|
| 142 |
-
Tool(
|
| 143 |
-
name="predict_kpa",
|
| 144 |
-
description="Predict keypoint-argument matching for a single pair",
|
| 145 |
-
input_schema={
|
| 146 |
-
"type": "object",
|
| 147 |
-
"properties": {
|
| 148 |
-
"argument": {"type": "string", "description": "The argument text"},
|
| 149 |
-
"key_point": {"type": "string", "description": "The key point to evaluate"}
|
| 150 |
-
},
|
| 151 |
-
"required": ["argument", "key_point"]
|
| 152 |
-
},
|
| 153 |
-
execute=predict_kpa_tool
|
| 154 |
-
),
|
| 155 |
-
Tool(
|
| 156 |
-
name="predict_stance",
|
| 157 |
-
description="Predict stance for a topic-argument pair",
|
| 158 |
-
input_schema={
|
| 159 |
-
"type": "object",
|
| 160 |
-
"properties": {
|
| 161 |
-
"topic": {"type": "string", "description": "The debate topic"},
|
| 162 |
-
"argument": {"type": "string", "description": "The argument to classify"}
|
| 163 |
-
},
|
| 164 |
-
"required": ["topic", "argument"]
|
| 165 |
-
},
|
| 166 |
-
execute=predict_stance_tool
|
| 167 |
-
),
|
| 168 |
-
Tool(
|
| 169 |
-
name="batch_predict_stance",
|
| 170 |
-
description="Predict stance for multiple topic-argument pairs",
|
| 171 |
-
input_schema={
|
| 172 |
-
"type": "object",
|
| 173 |
-
"properties": {
|
| 174 |
-
"items": {
|
| 175 |
-
"type": "array",
|
| 176 |
-
"items": {
|
| 177 |
-
"type": "object",
|
| 178 |
-
"properties": {
|
| 179 |
-
"topic": {"type": "string"},
|
| 180 |
-
"argument": {"type": "string"}
|
| 181 |
-
},
|
| 182 |
-
"required": ["topic", "argument"]
|
| 183 |
-
},
|
| 184 |
-
"description": "List of topic-argument pairs"
|
| 185 |
-
}
|
| 186 |
-
},
|
| 187 |
-
"required": ["items"]
|
| 188 |
-
},
|
| 189 |
-
execute=batch_stance_tool
|
| 190 |
-
),
|
| 191 |
-
Tool(
|
| 192 |
-
name="generate_argument",
|
| 193 |
-
description="Generate persuasive arguments based on prompt and context",
|
| 194 |
-
input_schema={
|
| 195 |
-
"type": "object",
|
| 196 |
-
"properties": {
|
| 197 |
-
"prompt": {"type": "string", "description": "Main topic or question"},
|
| 198 |
-
"context": {"type": "string", "description": "Additional context"},
|
| 199 |
-
"stance": {
|
| 200 |
-
"type": "string",
|
| 201 |
-
"enum": ["pro", "con", "neutral"],
|
| 202 |
-
"description": "Desired stance"
|
| 203 |
-
}
|
| 204 |
-
},
|
| 205 |
-
"required": ["prompt"]
|
| 206 |
-
},
|
| 207 |
-
execute=generate_argument_tool
|
| 208 |
-
),
|
| 209 |
-
Tool(
|
| 210 |
-
name="voice_chat",
|
| 211 |
-
description="Chat with voice assistant capabilities",
|
| 212 |
-
input_schema={
|
| 213 |
-
"type": "object",
|
| 214 |
-
"properties": {
|
| 215 |
-
"text": {"type": "string", "description": "Text input"},
|
| 216 |
-
"conversation_id": {"type": "string", "description": "Conversation ID for context"},
|
| 217 |
-
"tts": {"type": "boolean", "description": "Generate audio response"}
|
| 218 |
-
},
|
| 219 |
-
"required": ["text"]
|
| 220 |
-
},
|
| 221 |
-
execute=voice_chat_tool
|
| 222 |
-
)
|
| 223 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mcp/types.py
DELETED
|
@@ -1,353 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Type definitions for MCP (Model Context Protocol)
|
| 3 |
-
"""
|
| 4 |
-
from typing import Dict, Any, List, Optional, Union, TypedDict
|
| 5 |
-
from enum import Enum
|
| 6 |
-
from datetime import datetime
|
| 7 |
-
from pydantic import BaseModel, Field
|
| 8 |
-
|
| 9 |
-
# ==================== ENUMS ====================
|
| 10 |
-
|
| 11 |
-
class ModelType(str, Enum):
|
| 12 |
-
"""Types of models available"""
|
| 13 |
-
STANCE_DETECTION = "stance_detection"
|
| 14 |
-
KPA_MATCHING = "kpa_matching"
|
| 15 |
-
ARGUMENT_GENERATION = "argument_generation"
|
| 16 |
-
CHATBOT = "chatbot"
|
| 17 |
-
|
| 18 |
-
class StanceType(str, Enum):
|
| 19 |
-
"""Stance types"""
|
| 20 |
-
PRO = "PRO"
|
| 21 |
-
CON = "CON"
|
| 22 |
-
NEUTRAL = "NEUTRAL"
|
| 23 |
-
|
| 24 |
-
class KpaLabel(str, Enum):
|
| 25 |
-
"""KPA matching labels"""
|
| 26 |
-
APPARIE = "apparie"
|
| 27 |
-
NON_APPARIE = "non_apparie"
|
| 28 |
-
|
| 29 |
-
class ServiceStatus(str, Enum):
|
| 30 |
-
"""Service status"""
|
| 31 |
-
OPERATIONAL = "operational"
|
| 32 |
-
DEGRADED = "degraded"
|
| 33 |
-
MAINTENANCE = "maintenance"
|
| 34 |
-
OFFLINE = "offline"
|
| 35 |
-
|
| 36 |
-
class ToolCategory(str, Enum):
|
| 37 |
-
"""Tool categories"""
|
| 38 |
-
PREDICTION = "prediction"
|
| 39 |
-
GENERATION = "generation"
|
| 40 |
-
TRANSFORMATION = "transformation"
|
| 41 |
-
ANALYSIS = "analysis"
|
| 42 |
-
UTILITY = "utility"
|
| 43 |
-
|
| 44 |
-
# ==================== CORE TYPES ====================
|
| 45 |
-
|
| 46 |
-
class ResourceMetadata(TypedDict):
|
| 47 |
-
"""Metadata for a resource"""
|
| 48 |
-
uri: str
|
| 49 |
-
name: str
|
| 50 |
-
description: Optional[str]
|
| 51 |
-
mime_type: str
|
| 52 |
-
created_at: datetime
|
| 53 |
-
updated_at: datetime
|
| 54 |
-
tags: List[str]
|
| 55 |
-
|
| 56 |
-
class ToolMetadata(TypedDict):
|
| 57 |
-
"""Metadata for a tool"""
|
| 58 |
-
name: str
|
| 59 |
-
description: str
|
| 60 |
-
version: str
|
| 61 |
-
category: ToolCategory
|
| 62 |
-
input_schema: Dict[str, Any]
|
| 63 |
-
output_schema: Dict[str, Any]
|
| 64 |
-
rate_limit: Optional[int]
|
| 65 |
-
requires_auth: bool
|
| 66 |
-
|
| 67 |
-
class ModelMetadata(TypedDict):
|
| 68 |
-
"""Metadata for a model"""
|
| 69 |
-
model_id: str
|
| 70 |
-
model_type: ModelType
|
| 71 |
-
provider: str
|
| 72 |
-
version: str
|
| 73 |
-
description: str
|
| 74 |
-
capabilities: List[str]
|
| 75 |
-
parameters: Dict[str, Any]
|
| 76 |
-
hardware_requirements: Dict[str, Any]
|
| 77 |
-
|
| 78 |
-
# ==================== PREDICTION TYPES ====================
|
| 79 |
-
|
| 80 |
-
class PredictionInput(BaseModel):
|
| 81 |
-
"""Base class for prediction inputs"""
|
| 82 |
-
model_id: Optional[str] = Field(None, description="Specific model to use")
|
| 83 |
-
|
| 84 |
-
class StancePredictionInput(PredictionInput):
|
| 85 |
-
"""Input for stance prediction"""
|
| 86 |
-
topic: str = Field(..., min_length=5, max_length=500, description="Debate topic")
|
| 87 |
-
argument: str = Field(..., min_length=5, max_length=1000, description="Argument text")
|
| 88 |
-
|
| 89 |
-
class Config:
|
| 90 |
-
json_schema_extra = {
|
| 91 |
-
"example": {
|
| 92 |
-
"topic": "Climate change is the most pressing issue of our time",
|
| 93 |
-
"argument": "Renewable energy investments have created millions of jobs worldwide"
|
| 94 |
-
}
|
| 95 |
-
}
|
| 96 |
-
|
| 97 |
-
class KPAPredictionInput(PredictionInput):
|
| 98 |
-
"""Input for KPA prediction"""
|
| 99 |
-
argument: str = Field(..., description="Argument text")
|
| 100 |
-
key_point: str = Field(..., description="Key point to match")
|
| 101 |
-
|
| 102 |
-
class Config:
|
| 103 |
-
json_schema_extra = {
|
| 104 |
-
"example": {
|
| 105 |
-
"argument": "Renewable energy is cost-effective in the long term",
|
| 106 |
-
"key_point": "Economic benefits of green energy"
|
| 107 |
-
}
|
| 108 |
-
}
|
| 109 |
-
|
| 110 |
-
class BatchPredictionInput(BaseModel):
|
| 111 |
-
"""Input for batch predictions"""
|
| 112 |
-
items: List[Union[StancePredictionInput, KPAPredictionInput]]
|
| 113 |
-
batch_size: Optional[int] = Field(10, ge=1, le=100)
|
| 114 |
-
parallel: bool = Field(False, description="Process in parallel")
|
| 115 |
-
|
| 116 |
-
# ==================== GENERATION TYPES ====================
|
| 117 |
-
|
| 118 |
-
class ArgumentGenerationInput(BaseModel):
|
| 119 |
-
"""Input for argument generation"""
|
| 120 |
-
prompt: str = Field(..., description="Main topic or question")
|
| 121 |
-
context: Optional[str] = Field(None, description="Additional context")
|
| 122 |
-
stance: Optional[StanceType] = Field(StanceType.NEUTRAL, description="Desired stance")
|
| 123 |
-
length: Optional[str] = Field("medium", description="Argument length: short/medium/long")
|
| 124 |
-
style: Optional[str] = Field("persuasive", description="Writing style")
|
| 125 |
-
num_arguments: Optional[int] = Field(1, ge=1, le=5, description="Number of arguments to generate")
|
| 126 |
-
|
| 127 |
-
class Config:
|
| 128 |
-
json_schema_extra = {
|
| 129 |
-
"example": {
|
| 130 |
-
"prompt": "Should artificial intelligence be regulated?",
|
| 131 |
-
"stance": "PRO",
|
| 132 |
-
"context": "Focus on ethical considerations",
|
| 133 |
-
"length": "medium"
|
| 134 |
-
}
|
| 135 |
-
}
|
| 136 |
-
|
| 137 |
-
class CounterArgumentInput(BaseModel):
|
| 138 |
-
"""Input for counter-argument generation"""
|
| 139 |
-
original_argument: str = Field(..., description="Original argument to counter")
|
| 140 |
-
target_stance: StanceType = Field(..., description="Stance for counter-argument")
|
| 141 |
-
context: Optional[str] = Field(None, description="Additional context")
|
| 142 |
-
|
| 143 |
-
class Config:
|
| 144 |
-
json_schema_extra = {
|
| 145 |
-
"example": {
|
| 146 |
-
"original_argument": "AI regulation stifles innovation",
|
| 147 |
-
"target_stance": "CON",
|
| 148 |
-
"context": "Focus on safety and ethics"
|
| 149 |
-
}
|
| 150 |
-
}
|
| 151 |
-
|
| 152 |
-
# ==================== VOICE TYPES ====================
|
| 153 |
-
|
| 154 |
-
class AudioFormat(str, Enum):
|
| 155 |
-
"""Supported audio formats"""
|
| 156 |
-
WAV = "wav"
|
| 157 |
-
MP3 = "mp3"
|
| 158 |
-
M4A = "m4a"
|
| 159 |
-
OGG = "ogg"
|
| 160 |
-
|
| 161 |
-
class VoiceProfile(str, Enum):
|
| 162 |
-
"""Available voice profiles"""
|
| 163 |
-
ALIYAH = "Aaliyah-PlayAI"
|
| 164 |
-
ARIA = "Aria-PlayAI"
|
| 165 |
-
DEXTER = "Dexter-PlayAI"
|
| 166 |
-
FIONA = "Fiona-PlayAI"
|
| 167 |
-
|
| 168 |
-
class STTInput(BaseModel):
|
| 169 |
-
"""Input for speech-to-text"""
|
| 170 |
-
audio_format: AudioFormat = Field(AudioFormat.WAV, description="Audio format")
|
| 171 |
-
language: str = Field("en", description="Language code (en, fr, etc.)")
|
| 172 |
-
enable_timestamps: bool = Field(False, description="Include word timestamps")
|
| 173 |
-
|
| 174 |
-
class Config:
|
| 175 |
-
json_schema_extra = {
|
| 176 |
-
"example": {
|
| 177 |
-
"audio_format": "wav",
|
| 178 |
-
"language": "en",
|
| 179 |
-
"enable_timestamps": False
|
| 180 |
-
}
|
| 181 |
-
}
|
| 182 |
-
|
| 183 |
-
class TTSInput(BaseModel):
|
| 184 |
-
"""Input for text-to-speech"""
|
| 185 |
-
text: str = Field(..., description="Text to convert to speech")
|
| 186 |
-
voice: VoiceProfile = Field(VoiceProfile.ALIYAH, description="Voice to use")
|
| 187 |
-
format: AudioFormat = Field(AudioFormat.WAV, description="Output format")
|
| 188 |
-
speed: float = Field(1.0, ge=0.5, le=2.0, description="Speech speed")
|
| 189 |
-
pitch: float = Field(1.0, ge=0.5, le=2.0, description="Voice pitch")
|
| 190 |
-
|
| 191 |
-
class Config:
|
| 192 |
-
json_schema_extra = {
|
| 193 |
-
"example": {
|
| 194 |
-
"text": "Hello, this is a test of text-to-speech.",
|
| 195 |
-
"voice": "Aaliyah-PlayAI",
|
| 196 |
-
"format": "wav",
|
| 197 |
-
"speed": 1.0,
|
| 198 |
-
"pitch": 1.0
|
| 199 |
-
}
|
| 200 |
-
}
|
| 201 |
-
|
| 202 |
-
# ==================== RESPONSE TYPES ====================
|
| 203 |
-
|
| 204 |
-
class PredictionResult(BaseModel):
|
| 205 |
-
"""Base prediction result"""
|
| 206 |
-
prediction: Union[int, str]
|
| 207 |
-
confidence: float = Field(..., ge=0.0, le=1.0)
|
| 208 |
-
processing_time: Optional[float] = Field(None, description="Processing time in seconds")
|
| 209 |
-
|
| 210 |
-
class StancePredictionResult(PredictionResult):
|
| 211 |
-
"""Stance prediction result"""
|
| 212 |
-
predicted_stance: StanceType
|
| 213 |
-
probability_pro: float = Field(..., ge=0.0, le=1.0)
|
| 214 |
-
probability_con: float = Field(..., ge=0.0, le=1.0)
|
| 215 |
-
topic: str
|
| 216 |
-
argument: str
|
| 217 |
-
|
| 218 |
-
class KPAPredictionResult(PredictionResult):
|
| 219 |
-
"""KPA prediction result"""
|
| 220 |
-
label: KpaLabel
|
| 221 |
-
probabilities: Dict[KpaLabel, float]
|
| 222 |
-
argument: str
|
| 223 |
-
key_point: str
|
| 224 |
-
|
| 225 |
-
class GenerationResult(BaseModel):
|
| 226 |
-
"""Base generation result"""
|
| 227 |
-
generated_text: str
|
| 228 |
-
prompt: str
|
| 229 |
-
context: Optional[str]
|
| 230 |
-
parameters: Dict[str, Any]
|
| 231 |
-
generation_time: Optional[float]
|
| 232 |
-
|
| 233 |
-
class ArgumentGenerationResult(GenerationResult):
|
| 234 |
-
"""Argument generation result"""
|
| 235 |
-
stance: StanceType
|
| 236 |
-
length: str
|
| 237 |
-
style: str
|
| 238 |
-
coherence_score: Optional[float] = Field(None, ge=0.0, le=1.0)
|
| 239 |
-
|
| 240 |
-
class BatchResult(BaseModel):
|
| 241 |
-
"""Batch processing result"""
|
| 242 |
-
results: List[Union[StancePredictionResult, KPAPredictionResult, ArgumentGenerationResult]]
|
| 243 |
-
total_processed: int
|
| 244 |
-
successful: int
|
| 245 |
-
failed: int
|
| 246 |
-
average_confidence: Optional[float]
|
| 247 |
-
total_time: float
|
| 248 |
-
|
| 249 |
-
class ErrorResponse(BaseModel):
|
| 250 |
-
"""Error response"""
|
| 251 |
-
error: str
|
| 252 |
-
code: Optional[str]
|
| 253 |
-
details: Optional[Dict[str, Any]]
|
| 254 |
-
timestamp: datetime = Field(default_factory=datetime.now)
|
| 255 |
-
|
| 256 |
-
class HealthResponse(BaseModel):
|
| 257 |
-
"""Health check response"""
|
| 258 |
-
status: ServiceStatus
|
| 259 |
-
version: str
|
| 260 |
-
uptime: float
|
| 261 |
-
models: Dict[str, bool]
|
| 262 |
-
services: Dict[str, bool]
|
| 263 |
-
timestamp: datetime = Field(default_factory=datetime.now)
|
| 264 |
-
|
| 265 |
-
# ==================== TOOL EXECUTION TYPES ====================
|
| 266 |
-
|
| 267 |
-
class ToolExecutionContext(BaseModel):
|
| 268 |
-
"""Context for tool execution"""
|
| 269 |
-
tool_id: str
|
| 270 |
-
user_id: Optional[str]
|
| 271 |
-
session_id: Optional[str]
|
| 272 |
-
timestamp: datetime = Field(default_factory=datetime.now)
|
| 273 |
-
metadata: Optional[Dict[str, Any]]
|
| 274 |
-
|
| 275 |
-
class ToolExecutionResult(BaseModel):
|
| 276 |
-
"""Result of tool execution"""
|
| 277 |
-
success: bool
|
| 278 |
-
output: Optional[Dict[str, Any]]
|
| 279 |
-
error: Optional[str]
|
| 280 |
-
execution_time: float
|
| 281 |
-
context: ToolExecutionContext
|
| 282 |
-
|
| 283 |
-
# ==================== CONVERSATION TYPES ====================
|
| 284 |
-
|
| 285 |
-
class MessageRole(str, Enum):
|
| 286 |
-
"""Roles in conversation"""
|
| 287 |
-
USER = "user"
|
| 288 |
-
ASSISTANT = "assistant"
|
| 289 |
-
SYSTEM = "system"
|
| 290 |
-
|
| 291 |
-
class ConversationMessage(BaseModel):
|
| 292 |
-
"""Single message in conversation"""
|
| 293 |
-
role: MessageRole
|
| 294 |
-
content: str
|
| 295 |
-
timestamp: datetime = Field(default_factory=datetime.now)
|
| 296 |
-
metadata: Optional[Dict[str, Any]]
|
| 297 |
-
|
| 298 |
-
class ConversationState(BaseModel):
|
| 299 |
-
"""Conversation state"""
|
| 300 |
-
conversation_id: str
|
| 301 |
-
messages: List[ConversationMessage]
|
| 302 |
-
created_at: datetime
|
| 303 |
-
updated_at: datetime = Field(default_factory=datetime.now)
|
| 304 |
-
metadata: Dict[str, Any] = Field(default_factory=dict)
|
| 305 |
-
|
| 306 |
-
# ==================== EXPORT ====================
|
| 307 |
-
|
| 308 |
-
__all__ = [
|
| 309 |
-
# Enums
|
| 310 |
-
"ModelType",
|
| 311 |
-
"StanceType",
|
| 312 |
-
"KpaLabel",
|
| 313 |
-
"ServiceStatus",
|
| 314 |
-
"ToolCategory",
|
| 315 |
-
"AudioFormat",
|
| 316 |
-
"VoiceProfile",
|
| 317 |
-
"MessageRole",
|
| 318 |
-
|
| 319 |
-
# Input Types
|
| 320 |
-
"PredictionInput",
|
| 321 |
-
"StancePredictionInput",
|
| 322 |
-
"KPAPredictionInput",
|
| 323 |
-
"BatchPredictionInput",
|
| 324 |
-
"ArgumentGenerationInput",
|
| 325 |
-
"CounterArgumentInput",
|
| 326 |
-
"STTInput",
|
| 327 |
-
"TTSInput",
|
| 328 |
-
|
| 329 |
-
# Result Types
|
| 330 |
-
"PredictionResult",
|
| 331 |
-
"StancePredictionResult",
|
| 332 |
-
"KPAPredictionResult",
|
| 333 |
-
"GenerationResult",
|
| 334 |
-
"ArgumentGenerationResult",
|
| 335 |
-
"BatchResult",
|
| 336 |
-
|
| 337 |
-
# Response Types
|
| 338 |
-
"ErrorResponse",
|
| 339 |
-
"HealthResponse",
|
| 340 |
-
|
| 341 |
-
# Tool Types
|
| 342 |
-
"ToolExecutionContext",
|
| 343 |
-
"ToolExecutionResult",
|
| 344 |
-
|
| 345 |
-
# Conversation Types
|
| 346 |
-
"ConversationMessage",
|
| 347 |
-
"ConversationState",
|
| 348 |
-
|
| 349 |
-
# TypedDicts (for compatibility)
|
| 350 |
-
"ResourceMetadata",
|
| 351 |
-
"ToolMetadata",
|
| 352 |
-
"ModelMetadata"
|
| 353 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|