malek-messaoudii
Refactor audio processing and chatbot services; enhance STT and TTS functionalities with base64 support and session management
56dc677
raw
history blame
8.23 kB
import sys
from pathlib import Path
import os
import subprocess
import logging
from contextlib import asynccontextmanager
# Configure logging first
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Add the app directory to Python path
app_dir = Path(__file__).parent
if str(app_dir) not in sys.path:
sys.path.insert(0, str(app_dir))
def install_ffmpeg():
"""Install ffmpeg on system (required for audio processing)"""
try:
result = subprocess.run(["which", "ffmpeg"], capture_output=True, text=True)
if result.returncode == 0:
version_result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
if version_result.returncode == 0:
version = version_result.stdout.split()[2]
logger.info(f"βœ“ ffmpeg already installed: {version}")
return True
logger.info("Installing ffmpeg...")
subprocess.run(["apt-get", "update"], check=True, capture_output=True)
subprocess.run(["apt-get", "install", "-y", "ffmpeg"], check=True, capture_output=True)
verify = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
if verify.returncode == 0:
version = verify.stdout.split()[2]
logger.info(f"βœ“ ffmpeg installed successfully: {version}")
return True
return False
except Exception as e:
logger.warning(f"⚠️ ffmpeg installation warning: {e}")
return False
# Install system dependencies first
logger.info("="*60)
logger.info("Checking system dependencies...")
logger.info("="*60)
install_ffmpeg()
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from config import (
API_TITLE, API_DESCRIPTION, API_VERSION,
HUGGINGFACE_API_KEY, HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_LABEL_MODEL_ID,
HOST, PORT, RELOAD,
CORS_ORIGINS, CORS_CREDENTIALS, CORS_METHODS, CORS_HEADERS,
PRELOAD_MODELS_ON_STARTUP, LOAD_STANCE_MODEL, LOAD_KPA_MODEL,
LOAD_STT_MODEL, LOAD_CHATBOT_MODEL, STT_MODEL_ID, CHATBOT_MODEL_ID
)
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Load models on startup and cleanup on shutdown"""
logger.info("="*60)
logger.info("πŸš€ STARTING API - Loading Models...")
logger.info("="*60)
if PRELOAD_MODELS_ON_STARTUP:
# Load Stance Detection Model
if LOAD_STANCE_MODEL:
try:
logger.info(f"Loading Stance Model: {HUGGINGFACE_STANCE_MODEL_ID}")
from services.stance_model_manager import load_model as load_stance
load_stance(HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_API_KEY)
logger.info("βœ“ Stance model loaded successfully")
except Exception as e:
logger.error(f"βœ— Stance model loading failed: {str(e)}")
# Load KPA/Label Model
if LOAD_KPA_MODEL:
try:
logger.info(f"Loading KPA Model: {HUGGINGFACE_LABEL_MODEL_ID}")
from services.label_model_manager import load_model as load_kpa
load_kpa(HUGGINGFACE_LABEL_MODEL_ID, HUGGINGFACE_API_KEY)
logger.info("βœ“ KPA model loaded successfully")
except Exception as e:
logger.error(f"βœ— KPA model loading failed: {str(e)}")
# Load STT Model (Speech-to-Text)
if LOAD_STT_MODEL:
try:
logger.info(f"Loading STT Model: {STT_MODEL_ID}")
from services.stt_service import STTService
stt_service = STTService()
await stt_service.initialize()
logger.info("βœ“ STT model loaded successfully")
except Exception as e:
logger.error(f"βœ— STT model loading failed: {str(e)}")
# Load Chatbot Model
if LOAD_CHATBOT_MODEL:
try:
logger.info(f"Loading Chatbot Model: {CHATBOT_MODEL_ID}")
from services.chatbot_service import ChatbotService
chatbot_service = ChatbotService()
await chatbot_service.initialize()
logger.info("βœ“ Chatbot model loaded successfully")
except Exception as e:
logger.error(f"βœ— Chatbot model loading failed: {str(e)}")
logger.info("="*60)
logger.info("βœ“ API startup complete - Ready to serve requests")
logger.info(f"πŸ“š API Docs: http://{HOST}:{PORT}/docs")
logger.info("="*60)
yield # Application runs here
# Shutdown
logger.info("Shutting down API...")
# Create FastAPI application
app = FastAPI(
title=API_TITLE,
description=API_DESCRIPTION,
version=API_VERSION,
docs_url="/docs",
redoc_url="/redoc",
lifespan=lifespan,
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=CORS_ORIGINS,
allow_credentials=CORS_CREDENTIALS,
allow_methods=CORS_METHODS,
allow_headers=CORS_HEADERS,
)
# Include routers
try:
from routes.audio import router as chatbot_router
app.include_router(chatbot_router, prefix="/api/v1", tags=["Voice Chatbot"])
logger.info("βœ“ Chatbot routes registered")
except Exception as e:
logger.warning(f"⚠️ Chatbot routes failed to load: {e}")
try:
from routes.audio import router as audio_router
app.include_router(audio_router, prefix="/audio", tags=["Audio Processing"])
logger.info("βœ“ Audio routes registered")
except Exception as e:
logger.warning(f"⚠️ Audio routes failed to load: {e}")
try:
from routes import api_router
app.include_router(api_router)
logger.info("βœ“ API routes registered")
except Exception as e:
logger.warning(f"⚠️ API routes failed to load: {e}")
# Health check endpoints
@app.get("/")
async def root():
"""Root endpoint"""
return {
"message": "NLP Debater API with Voice Chatbot",
"status": "healthy",
"version": API_VERSION,
"docs": "/docs",
"endpoints": {
"voice_chatbot": "/api/v1/chat/message",
"audio_processing": "/docs#/Audio%20Processing",
"health": "/health",
"models-status": "/models-status"
}
}
@app.get("/health")
async def health_check():
"""Simple health check"""
return {"status": "healthy", "message": "API is running"}
@app.get("/models-status")
async def models_status():
"""Check which models are loaded"""
status = {
"stt_model": "unknown",
"tts_engine": "gtts (free)",
"chatbot_model": "unknown",
"stance_model": "unknown",
"kpa_model": "unknown"
}
try:
from services.stt_service import STTService
stt_service = STTService()
status["stt_model"] = "loaded" if hasattr(stt_service, 'initialized') and stt_service.initialized else "not loaded"
except:
status["stt_model"] = "error"
try:
from services.chatbot_service import ChatbotService
chatbot_service = ChatbotService()
status["chatbot_model"] = "loaded" if hasattr(chatbot_service, 'initialized') and chatbot_service.initialized else "not loaded"
except:
status["chatbot_model"] = "error"
return status
@app.get("/check-ffmpeg")
async def check_ffmpeg():
"""Check if ffmpeg is installed"""
try:
result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
if result.returncode == 0:
version = result.stdout.split('\n')[0]
return {"status": "available", "version": version}
else:
return {"status": "error", "error": result.stderr}
except FileNotFoundError:
return {"status": "not found", "error": "ffmpeg is not installed"}
if __name__ == "__main__":
logger.info(f"πŸš€ Starting server on {HOST}:{PORT}")
logger.info(f"πŸ“š Documentation: http://{HOST}:{PORT}/docs")
uvicorn.run(
"main:app",
host=HOST,
port=PORT,
reload=RELOAD,
log_level="info"
)