File size: 1,933 Bytes
9db766f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
"""Pydantic schemas for stance detection endpoints"""
from pydantic import BaseModel, Field, ConfigDict
from typing import List
class StanceRequest(BaseModel):
"""Request model for stance prediction"""
model_config = ConfigDict(
json_schema_extra={
"example": {
"topic": "Assisted suicide should be a criminal offence",
"argument": "People have the right to choose how they end their lives"
}
}
)
topic: str = Field(..., min_length=5, max_length=500,
description="The debate topic or statement")
argument: str = Field(..., min_length=5, max_length=1000,
description="The argument text to classify")
class StanceResponse(BaseModel):
"""Response model for stance prediction"""
model_config = ConfigDict(
json_schema_extra={
"example": {
"topic": "Assisted suicide should be a criminal offence",
"argument": "People have the right to choose how they end their lives",
"predicted_stance": "CON",
"confidence": 0.9234,
"probability_con": 0.9234,
"probability_pro": 0.0766,
"timestamp": "2024-11-15T10:30:00"
}
}
)
topic: str
argument: str
predicted_stance: str = Field(..., description="PRO or CON")
confidence: float = Field(..., ge=0.0, le=1.0)
probability_con: float
probability_pro: float
timestamp: str
class BatchStanceRequest(BaseModel):
"""Request model for batch predictions"""
items: List[StanceRequest] = Field(..., max_length=50,
description="List of topic-argument pairs (max 50)")
class BatchStanceResponse(BaseModel):
"""Response model for batch predictions"""
results: List[StanceResponse]
total_processed: int
|