File size: 3,515 Bytes
83c5f9d 8791d59 1315b27 83c5f9d 812086d 83c5f9d c508ed0 83c5f9d 8791d59 1315b27 83c5f9d 1315b27 83c5f9d c508ed0 83c5f9d 3677973 83c5f9d 812086d b36d7d0 812086d 1315b27 83c5f9d 812086d 83c5f9d 1315b27 83c5f9d 812086d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
"""Service pour initialiser le serveur MCP avec FastMCP"""
from mcp.server.fastmcp import FastMCP
from typing import Dict, Any
import logging
from fastapi import FastAPI
from services.stance_model_manager import stance_model_manager
from services.label_model_manager import kpa_model_manager
from services.stt_service import speech_to_text
from services.tts_service import text_to_speech
from services.generate_model_manager import generate_model_manager
logger = logging.getLogger(__name__)
# Créer l'instance FastMCP
mcp_server = FastMCP("NLP-Debater-MCP", json_response=True, stateless_http=False) # Stateful pour sessions
# Tools (inchangés, OK)
@mcp_server.tool()
def detect_stance(topic: str, argument: str) -> Dict[str, Any]:
if not stance_model_manager.model_loaded:
raise ValueError("Modèle stance non chargé")
result = stance_model_manager.predict(topic, argument)
return {
"predicted_stance": result["predicted_stance"],
"confidence": result["confidence"],
"probability_con": result["probability_con"],
"probability_pro": result["probability_pro"]
}
@mcp_server.tool()
def match_keypoint_argument(argument: str, key_point: str) -> Dict[str, Any]:
if not kpa_model_manager.model_loaded:
raise ValueError("Modèle KPA non chargé")
result = kpa_model_manager.predict(argument, key_point)
return {
"prediction": result["prediction"],
"label": result["label"],
"confidence": result["confidence"],
"probabilities": result["probabilities"]
}
@mcp_server.tool()
def transcribe_audio(audio_path: str) -> str:
return speech_to_text(audio_path)
@mcp_server.tool()
def generate_speech(text: str, voice: str = "Aaliyah-PlayAI", format: str = "wav") -> str:
return text_to_speech(text, voice, format)
@mcp_server.tool()
def generate_argument(topic: str, position: str) -> Dict[str, Any]:
"""Generate an argument for a given topic and position"""
if not generate_model_manager.model_loaded:
raise ValueError("Modèle de génération non chargé")
argument = generate_model_manager.generate(topic=topic, position=position)
return {
"topic": topic,
"position": position,
"argument": argument
}
@mcp_server.resource("debate://prompt")
def get_debate_prompt() -> str:
return "Tu es un expert en débat. Génère 3 arguments PRO pour le topic donné. Sois concis et persuasif."
# Health tool (enregistré avant l'initialisation)
@mcp_server.tool()
def health_check() -> Dict[str, Any]:
"""Health check pour le serveur MCP"""
try:
# Liste hardcodée pour éviter les problèmes avec list_tools()
tool_names = [
"detect_stance",
"match_keypoint_argument",
"transcribe_audio",
"generate_speech",
"generate_argument",
"health_check"
]
except Exception:
tool_names = []
return {"status": "healthy", "tools": tool_names}
def init_mcp_server(app: FastAPI) -> None:
"""
Initialise et monte le serveur MCP sur l'app FastAPI.
"""
# CORRIGÉ : Utilise streamable_http_app() qui retourne l'ASGI app
mcp_app = mcp_server.streamable_http_app() # L'ASGI app pour mounting (gère /health, /tools, etc. nativement)
# Monte à /api/v1/mcp - FastAPI gère le lifespan auto
app.mount("/api/v1/mcp", mcp_app)
logger.info("✓ Serveur MCP monté sur /api/v1/mcp avec tools NLP/STT/TTS") |