malek-messaoudii commited on
Commit
5fb4696
Β·
1 Parent(s): 300cb09

Refactor ffmpeg installation and enhance logging; add health check endpoints for API status

Browse files
Files changed (1) hide show
  1. main.py +97 -9
main.py CHANGED
@@ -2,24 +2,63 @@
2
 
3
  import sys
4
  from pathlib import Path
 
 
 
 
 
 
 
5
 
6
  # Add the app directory to Python path to ensure imports work
7
  app_dir = Path(__file__).parent
8
  if str(app_dir) not in sys.path:
9
  sys.path.insert(0, str(app_dir))
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  from contextlib import asynccontextmanager
12
  from fastapi import FastAPI
13
  from fastapi.middleware.cors import CORSMiddleware
14
  import uvicorn
15
- import logging
16
 
17
  from config import (
18
  API_TITLE,
19
  API_DESCRIPTION,
20
  API_VERSION,
21
- STANCE_MODEL_ID,
22
- LABEL_MODEL_ID,
23
  HUGGINGFACE_API_KEY,
24
  HUGGINGFACE_STANCE_MODEL_ID,
25
  HUGGINGFACE_LABEL_MODEL_ID,
@@ -34,10 +73,6 @@ from config import (
34
  from services import stance_model_manager, kpa_model_manager
35
  from routes import api_router
36
 
37
- # Configure logging
38
- logging.basicConfig(level=logging.INFO)
39
- logger = logging.getLogger(__name__)
40
-
41
 
42
  @asynccontextmanager
43
  async def lifespan(app: FastAPI):
@@ -49,6 +84,7 @@ async def lifespan(app: FastAPI):
49
  try:
50
  logger.info(f"Loading stance model from Hugging Face: {HUGGINGFACE_STANCE_MODEL_ID}")
51
  stance_model_manager.load_model(HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_API_KEY)
 
52
  except Exception as e:
53
  logger.error(f"βœ— Failed to load stance model: {str(e)}")
54
  logger.error("⚠️ Stance detection endpoints will not work!")
@@ -57,16 +93,31 @@ async def lifespan(app: FastAPI):
57
  try:
58
  logger.info(f"Loading KPA model from Hugging Face: {HUGGINGFACE_LABEL_MODEL_ID}")
59
  kpa_model_manager.load_model(HUGGINGFACE_LABEL_MODEL_ID, HUGGINGFACE_API_KEY)
 
60
  except Exception as e:
61
  logger.error(f"βœ— Failed to load KPA model: {str(e)}")
62
  logger.error("⚠️ KPA/Label prediction endpoints will not work!")
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  logger.info("βœ“ API startup complete")
65
- logger.info("https://nlp-debater-project-fastapi-backend-models.hf.space/docs")
66
 
67
  yield # Application runs here
68
 
69
  # Shutdown: Cleanup (if needed)
 
70
  # Currently no cleanup needed, but you can add it here if necessary
71
 
72
  # Create FastAPI application
@@ -91,12 +142,49 @@ app.add_middleware(
91
  # Include API routes
92
  app.include_router(api_router)
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  if __name__ == "__main__":
96
  # Run the API server
97
  # Access at: http://localhost:8000
98
  # API docs at: http://localhost:8000/docs
99
 
 
 
 
100
  # Run the API server
101
  uvicorn.run(
102
  "main:app",
@@ -104,4 +192,4 @@ if __name__ == "__main__":
104
  port=PORT,
105
  reload=RELOAD,
106
  log_level="info"
107
- )
 
2
 
3
  import sys
4
  from pathlib import Path
5
+ import os
6
+ import subprocess
7
+ import logging
8
+
9
+ # Configure logging first
10
+ logging.basicConfig(level=logging.INFO)
11
+ logger = logging.getLogger(__name__)
12
 
13
  # Add the app directory to Python path to ensure imports work
14
  app_dir = Path(__file__).parent
15
  if str(app_dir) not in sys.path:
16
  sys.path.insert(0, str(app_dir))
17
 
18
+ def install_ffmpeg():
19
+ """Install ffmpeg on Hugging Face Spaces"""
20
+ try:
21
+ # Check if ffmpeg is already installed
22
+ result = subprocess.run(["which", "ffmpeg"], capture_output=True, text=True)
23
+ if result.returncode == 0:
24
+ logger.info("βœ“ ffmpeg is already installed")
25
+ # Verify it works
26
+ version_result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
27
+ if version_result.returncode == 0:
28
+ logger.info(f"βœ“ ffmpeg version: {version_result.stdout.split()[2]}")
29
+ return True
30
+
31
+ logger.info("Installing ffmpeg...")
32
+ # Update package list and install ffmpeg
33
+ subprocess.run(["apt-get", "update"], check=True, capture_output=True)
34
+ subprocess.run(["apt-get", "install", "-y", "ffmpeg"], check=True, capture_output=True)
35
+
36
+ # Verify installation
37
+ verify_result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
38
+ if verify_result.returncode == 0:
39
+ logger.info(f"βœ“ ffmpeg installed successfully: {verify_result.stdout.split()[2]}")
40
+ return True
41
+ else:
42
+ logger.error("βœ— ffmpeg installation verification failed")
43
+ return False
44
+
45
+ except Exception as e:
46
+ logger.error(f"βœ— Failed to install ffmpeg: {e}")
47
+ return False
48
+
49
+ # Install ffmpeg before importing other modules
50
+ logger.info("===== Checking system dependencies =====")
51
+ install_ffmpeg()
52
+
53
  from contextlib import asynccontextmanager
54
  from fastapi import FastAPI
55
  from fastapi.middleware.cors import CORSMiddleware
56
  import uvicorn
 
57
 
58
  from config import (
59
  API_TITLE,
60
  API_DESCRIPTION,
61
  API_VERSION,
 
 
62
  HUGGINGFACE_API_KEY,
63
  HUGGINGFACE_STANCE_MODEL_ID,
64
  HUGGINGFACE_LABEL_MODEL_ID,
 
73
  from services import stance_model_manager, kpa_model_manager
74
  from routes import api_router
75
 
 
 
 
 
76
 
77
  @asynccontextmanager
78
  async def lifespan(app: FastAPI):
 
84
  try:
85
  logger.info(f"Loading stance model from Hugging Face: {HUGGINGFACE_STANCE_MODEL_ID}")
86
  stance_model_manager.load_model(HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_API_KEY)
87
+ logger.info("βœ“ Stance model loaded successfully")
88
  except Exception as e:
89
  logger.error(f"βœ— Failed to load stance model: {str(e)}")
90
  logger.error("⚠️ Stance detection endpoints will not work!")
 
93
  try:
94
  logger.info(f"Loading KPA model from Hugging Face: {HUGGINGFACE_LABEL_MODEL_ID}")
95
  kpa_model_manager.load_model(HUGGINGFACE_LABEL_MODEL_ID, HUGGINGFACE_API_KEY)
96
+ logger.info("βœ“ KPA model loaded successfully")
97
  except Exception as e:
98
  logger.error(f"βœ— Failed to load KPA model: {str(e)}")
99
  logger.error("⚠️ KPA/Label prediction endpoints will not work!")
100
 
101
+ # Load STT and Chatbot models
102
+ try:
103
+ from services.stt_service import load_stt_model
104
+ from services.chatbot_service import load_chatbot_model
105
+
106
+ logger.info("Loading STT and Chatbot models...")
107
+ load_stt_model()
108
+ load_chatbot_model()
109
+ logger.info("βœ“ STT and Chatbot models loaded successfully")
110
+ except Exception as e:
111
+ logger.error(f"βœ— Failed to load STT/Chatbot models: {str(e)}")
112
+ logger.error("⚠️ Audio endpoints may not work properly!")
113
+
114
  logger.info("βœ“ API startup complete")
115
+ logger.info(f"πŸ“š API Documentation: https://nlp-debater-project-fastapi-backend-models.hf.space/docs")
116
 
117
  yield # Application runs here
118
 
119
  # Shutdown: Cleanup (if needed)
120
+ logger.info("Shutting down API...")
121
  # Currently no cleanup needed, but you can add it here if necessary
122
 
123
  # Create FastAPI application
 
142
  # Include API routes
143
  app.include_router(api_router)
144
 
145
+ # Health check endpoint
146
+ @app.get("/")
147
+ async def root():
148
+ """Health check endpoint"""
149
+ return {
150
+ "message": "NLP Debater API is running!",
151
+ "status": "healthy",
152
+ "docs": "/docs"
153
+ }
154
+
155
+ @app.get("/health")
156
+ async def health_check():
157
+ """Health check endpoint"""
158
+ return {
159
+ "status": "healthy",
160
+ "message": "API is running successfully"
161
+ }
162
+
163
+ @app.get("/check-ffmpeg")
164
+ async def check_ffmpeg():
165
+ """Check if ffmpeg is available"""
166
+ try:
167
+ result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
168
+ if result.returncode == 0:
169
+ return {
170
+ "status": "available",
171
+ "version": result.stdout.split('\n')[0],
172
+ "message": "ffmpeg is ready for audio processing"
173
+ }
174
+ else:
175
+ return {"status": "error", "error": result.stderr}
176
+ except FileNotFoundError:
177
+ return {"status": "ffmpeg not found"}
178
+
179
 
180
  if __name__ == "__main__":
181
  # Run the API server
182
  # Access at: http://localhost:8000
183
  # API docs at: http://localhost:8000/docs
184
 
185
+ logger.info(f"πŸš€ Starting server on {HOST}:{PORT}")
186
+ logger.info(f"πŸ“š Documentation available at: http://{HOST}:{PORT}/docs")
187
+
188
  # Run the API server
189
  uvicorn.run(
190
  "main:app",
 
192
  port=PORT,
193
  reload=RELOAD,
194
  log_level="info"
195
+ )