malek-messaoudii
refactor: Remove GenerateArgumentRequest and GenerateArgumentResponse models; update generate_argument endpoint to accept topic and position parameters.
c508ed0
raw
history blame
3.52 kB
"""Service pour initialiser le serveur MCP avec FastMCP"""
from mcp.server.fastmcp import FastMCP
from typing import Dict, Any
import logging
from fastapi import FastAPI
from services.stance_model_manager import stance_model_manager
from services.label_model_manager import kpa_model_manager
from services.stt_service import speech_to_text
from services.tts_service import text_to_speech
from services.generate_model_manager import generate_model_manager
logger = logging.getLogger(__name__)
# Créer l'instance FastMCP
mcp_server = FastMCP("NLP-Debater-MCP", json_response=True, stateless_http=False) # Stateful pour sessions
# Tools (inchangés, OK)
@mcp_server.tool()
def detect_stance(topic: str, argument: str) -> Dict[str, Any]:
if not stance_model_manager.model_loaded:
raise ValueError("Modèle stance non chargé")
result = stance_model_manager.predict(topic, argument)
return {
"predicted_stance": result["predicted_stance"],
"confidence": result["confidence"],
"probability_con": result["probability_con"],
"probability_pro": result["probability_pro"]
}
@mcp_server.tool()
def match_keypoint_argument(argument: str, key_point: str) -> Dict[str, Any]:
if not kpa_model_manager.model_loaded:
raise ValueError("Modèle KPA non chargé")
result = kpa_model_manager.predict(argument, key_point)
return {
"prediction": result["prediction"],
"label": result["label"],
"confidence": result["confidence"],
"probabilities": result["probabilities"]
}
@mcp_server.tool()
def transcribe_audio(audio_path: str) -> str:
return speech_to_text(audio_path)
@mcp_server.tool()
def generate_speech(text: str, voice: str = "Aaliyah-PlayAI", format: str = "wav") -> str:
return text_to_speech(text, voice, format)
@mcp_server.tool()
def generate_argument(topic: str, position: str) -> Dict[str, Any]:
"""Generate an argument for a given topic and position"""
if not generate_model_manager.model_loaded:
raise ValueError("Modèle de génération non chargé")
argument = generate_model_manager.generate(topic=topic, position=position)
return {
"topic": topic,
"position": position,
"argument": argument
}
@mcp_server.resource("debate://prompt")
def get_debate_prompt() -> str:
return "Tu es un expert en débat. Génère 3 arguments PRO pour le topic donné. Sois concis et persuasif."
# Health tool (enregistré avant l'initialisation)
@mcp_server.tool()
def health_check() -> Dict[str, Any]:
"""Health check pour le serveur MCP"""
try:
# Liste hardcodée pour éviter les problèmes avec list_tools()
tool_names = [
"detect_stance",
"match_keypoint_argument",
"transcribe_audio",
"generate_speech",
"generate_argument",
"health_check"
]
except Exception:
tool_names = []
return {"status": "healthy", "tools": tool_names}
def init_mcp_server(app: FastAPI) -> None:
"""
Initialise et monte le serveur MCP sur l'app FastAPI.
"""
# CORRIGÉ : Utilise streamable_http_app() qui retourne l'ASGI app
mcp_app = mcp_server.streamable_http_app() # L'ASGI app pour mounting (gère /health, /tools, etc. nativement)
# Monte à /api/v1/mcp - FastAPI gère le lifespan auto
app.mount("/api/v1/mcp", mcp_app)
logger.info("✓ Serveur MCP monté sur /api/v1/mcp avec tools NLP/STT/TTS")