""" Type definitions for MCP (Model Context Protocol) """ from typing import Dict, Any, List, Optional, Union, TypedDict from enum import Enum from datetime import datetime from pydantic import BaseModel, Field # ==================== ENUMS ==================== class ModelType(str, Enum): """Types of models available""" STANCE_DETECTION = "stance_detection" KPA_MATCHING = "kpa_matching" ARGUMENT_GENERATION = "argument_generation" CHATBOT = "chatbot" class StanceType(str, Enum): """Stance types""" PRO = "PRO" CON = "CON" NEUTRAL = "NEUTRAL" class KpaLabel(str, Enum): """KPA matching labels""" APPARIE = "apparie" NON_APPARIE = "non_apparie" class ServiceStatus(str, Enum): """Service status""" OPERATIONAL = "operational" DEGRADED = "degraded" MAINTENANCE = "maintenance" OFFLINE = "offline" class ToolCategory(str, Enum): """Tool categories""" PREDICTION = "prediction" GENERATION = "generation" TRANSFORMATION = "transformation" ANALYSIS = "analysis" UTILITY = "utility" # ==================== CORE TYPES ==================== class ResourceMetadata(TypedDict): """Metadata for a resource""" uri: str name: str description: Optional[str] mime_type: str created_at: datetime updated_at: datetime tags: List[str] class ToolMetadata(TypedDict): """Metadata for a tool""" name: str description: str version: str category: ToolCategory input_schema: Dict[str, Any] output_schema: Dict[str, Any] rate_limit: Optional[int] requires_auth: bool class ModelMetadata(TypedDict): """Metadata for a model""" model_id: str model_type: ModelType provider: str version: str description: str capabilities: List[str] parameters: Dict[str, Any] hardware_requirements: Dict[str, Any] # ==================== PREDICTION TYPES ==================== class PredictionInput(BaseModel): """Base class for prediction inputs""" model_id: Optional[str] = Field(None, description="Specific model to use") class StancePredictionInput(PredictionInput): """Input for stance prediction""" topic: str = Field(..., min_length=5, max_length=500, description="Debate topic") argument: str = Field(..., min_length=5, max_length=1000, description="Argument text") class Config: json_schema_extra = { "example": { "topic": "Climate change is the most pressing issue of our time", "argument": "Renewable energy investments have created millions of jobs worldwide" } } class KPAPredictionInput(PredictionInput): """Input for KPA prediction""" argument: str = Field(..., description="Argument text") key_point: str = Field(..., description="Key point to match") class Config: json_schema_extra = { "example": { "argument": "Renewable energy is cost-effective in the long term", "key_point": "Economic benefits of green energy" } } class BatchPredictionInput(BaseModel): """Input for batch predictions""" items: List[Union[StancePredictionInput, KPAPredictionInput]] batch_size: Optional[int] = Field(10, ge=1, le=100) parallel: bool = Field(False, description="Process in parallel") # ==================== GENERATION TYPES ==================== class ArgumentGenerationInput(BaseModel): """Input for argument generation""" prompt: str = Field(..., description="Main topic or question") context: Optional[str] = Field(None, description="Additional context") stance: Optional[StanceType] = Field(StanceType.NEUTRAL, description="Desired stance") length: Optional[str] = Field("medium", description="Argument length: short/medium/long") style: Optional[str] = Field("persuasive", description="Writing style") num_arguments: Optional[int] = Field(1, ge=1, le=5, description="Number of arguments to generate") class Config: json_schema_extra = { "example": { "prompt": "Should artificial intelligence be regulated?", "stance": "PRO", "context": "Focus on ethical considerations", "length": "medium" } } class CounterArgumentInput(BaseModel): """Input for counter-argument generation""" original_argument: str = Field(..., description="Original argument to counter") target_stance: StanceType = Field(..., description="Stance for counter-argument") context: Optional[str] = Field(None, description="Additional context") class Config: json_schema_extra = { "example": { "original_argument": "AI regulation stifles innovation", "target_stance": "CON", "context": "Focus on safety and ethics" } } # ==================== VOICE TYPES ==================== class AudioFormat(str, Enum): """Supported audio formats""" WAV = "wav" MP3 = "mp3" M4A = "m4a" OGG = "ogg" class VoiceProfile(str, Enum): """Available voice profiles""" ALIYAH = "Aaliyah-PlayAI" ARIA = "Aria-PlayAI" DEXTER = "Dexter-PlayAI" FIONA = "Fiona-PlayAI" class STTInput(BaseModel): """Input for speech-to-text""" audio_format: AudioFormat = Field(AudioFormat.WAV, description="Audio format") language: str = Field("en", description="Language code (en, fr, etc.)") enable_timestamps: bool = Field(False, description="Include word timestamps") class Config: json_schema_extra = { "example": { "audio_format": "wav", "language": "en", "enable_timestamps": False } } class TTSInput(BaseModel): """Input for text-to-speech""" text: str = Field(..., description="Text to convert to speech") voice: VoiceProfile = Field(VoiceProfile.ALIYAH, description="Voice to use") format: AudioFormat = Field(AudioFormat.WAV, description="Output format") speed: float = Field(1.0, ge=0.5, le=2.0, description="Speech speed") pitch: float = Field(1.0, ge=0.5, le=2.0, description="Voice pitch") class Config: json_schema_extra = { "example": { "text": "Hello, this is a test of text-to-speech.", "voice": "Aaliyah-PlayAI", "format": "wav", "speed": 1.0, "pitch": 1.0 } } # ==================== RESPONSE TYPES ==================== class PredictionResult(BaseModel): """Base prediction result""" prediction: Union[int, str] confidence: float = Field(..., ge=0.0, le=1.0) processing_time: Optional[float] = Field(None, description="Processing time in seconds") class StancePredictionResult(PredictionResult): """Stance prediction result""" predicted_stance: StanceType probability_pro: float = Field(..., ge=0.0, le=1.0) probability_con: float = Field(..., ge=0.0, le=1.0) topic: str argument: str class KPAPredictionResult(PredictionResult): """KPA prediction result""" label: KpaLabel probabilities: Dict[KpaLabel, float] argument: str key_point: str class GenerationResult(BaseModel): """Base generation result""" generated_text: str prompt: str context: Optional[str] parameters: Dict[str, Any] generation_time: Optional[float] class ArgumentGenerationResult(GenerationResult): """Argument generation result""" stance: StanceType length: str style: str coherence_score: Optional[float] = Field(None, ge=0.0, le=1.0) class BatchResult(BaseModel): """Batch processing result""" results: List[Union[StancePredictionResult, KPAPredictionResult, ArgumentGenerationResult]] total_processed: int successful: int failed: int average_confidence: Optional[float] total_time: float class ErrorResponse(BaseModel): """Error response""" error: str code: Optional[str] details: Optional[Dict[str, Any]] timestamp: datetime = Field(default_factory=datetime.now) class HealthResponse(BaseModel): """Health check response""" status: ServiceStatus version: str uptime: float models: Dict[str, bool] services: Dict[str, bool] timestamp: datetime = Field(default_factory=datetime.now) # ==================== TOOL EXECUTION TYPES ==================== class ToolExecutionContext(BaseModel): """Context for tool execution""" tool_id: str user_id: Optional[str] session_id: Optional[str] timestamp: datetime = Field(default_factory=datetime.now) metadata: Optional[Dict[str, Any]] class ToolExecutionResult(BaseModel): """Result of tool execution""" success: bool output: Optional[Dict[str, Any]] error: Optional[str] execution_time: float context: ToolExecutionContext # ==================== CONVERSATION TYPES ==================== class MessageRole(str, Enum): """Roles in conversation""" USER = "user" ASSISTANT = "assistant" SYSTEM = "system" class ConversationMessage(BaseModel): """Single message in conversation""" role: MessageRole content: str timestamp: datetime = Field(default_factory=datetime.now) metadata: Optional[Dict[str, Any]] class ConversationState(BaseModel): """Conversation state""" conversation_id: str messages: List[ConversationMessage] created_at: datetime updated_at: datetime = Field(default_factory=datetime.now) metadata: Dict[str, Any] = Field(default_factory=dict) # ==================== EXPORT ==================== __all__ = [ # Enums "ModelType", "StanceType", "KpaLabel", "ServiceStatus", "ToolCategory", "AudioFormat", "VoiceProfile", "MessageRole", # Input Types "PredictionInput", "StancePredictionInput", "KPAPredictionInput", "BatchPredictionInput", "ArgumentGenerationInput", "CounterArgumentInput", "STTInput", "TTSInput", # Result Types "PredictionResult", "StancePredictionResult", "KPAPredictionResult", "GenerationResult", "ArgumentGenerationResult", "BatchResult", # Response Types "ErrorResponse", "HealthResponse", # Tool Types "ToolExecutionContext", "ToolExecutionResult", # Conversation Types "ConversationMessage", "ConversationState", # TypedDicts (for compatibility) "ResourceMetadata", "ToolMetadata", "ModelMetadata" ]