sanchitshaleen
Initial deployment of RAG with Gemma-3 to Hugging Face Spaces
4aec76b
"""Logger module for centralized logging across the RAG system.
Provides a unified logging interface for all components with:
- File logging with configurable output paths
- Console logging with level filtering
- Timezone support (CST - Central Standard Time)
- Instance caching to prevent duplicate loggers
"""
import logging
from typing import Literal, Optional
# Logs priority levels (highest to lowest):
# CRITICAL > ERROR > WARNING > INFO > DEBUG > NOTSET
# Global cache for logger instances to avoid creating duplicates:
_logger_instances = {}
def get_logger(
name: str = "unset",
log_to_console: bool = False,
log_to_file: bool = True,
log_file: Optional[str] = "app.log",
) -> logging.Logger:
"""Get or create a logger instance with the given name and configuration.
Uses instance caching to ensure only one logger per name exists, preventing
duplicate logs from being written.
Args:
name (str): Name identifier for the logger (shown in log messages).
log_to_console (bool): Whether to output logs to console. Defaults to False.
log_to_file (bool): Whether to output logs to file. Defaults to True.
log_file (Optional[str]): Path to the log file. If None, file logging is disabled.
Returns:
logging.Logger: Configured logger instance for use in code.
Example:
>>> logger = get_logger("my_module", log_to_console=True)
>>> logger.info("Application started")
"""
# Return cached logger if it already exists
if name in _logger_instances:
return _logger_instances[name]
# Create new logger instance
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG) # Capture all log levels
logger.propagate = False # Prevent duplicate logs from root logger
# Format logs with timestamp (CST), level, logger name, and message
formatter = logging.Formatter(
'%(asctime)s.%(msecs)03d [%(levelname)-8s] [%(name)s] %(message)s',
datefmt='%d-%m-%y %H:%M:%S' # CST timezone format
)
# File handler: writes logs to specified file
if log_to_file and log_file:
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.DEBUG) # Log all levels to file
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Console handler: writes logs to stdout
if log_to_console:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.WARNING) # Only warnings and above to console
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# Log initialization details
logger.info(
f"Logger initialized. [File: '{log_file if log_to_file else 'Disabled'}', "
f"Console: '{'Enabled' if log_to_console else 'Disabled'}']"
)
# Cache the logger for future requests
_logger_instances[name] = logger
return logger
def log_message(
logger: logging.Logger,
message: str,
level: Literal["debug", "info", "warning", "error", "critical"] = "info"
) -> None:
"""Utility function to log messages at various severity levels.
This is a convenience wrapper around logger.debug(), .info(), etc.
Args:
logger (logging.Logger): The logger instance to use.
message (str): The message text to log.
level (Literal): The logging level to use. Options: debug, info, warning, error, critical.
Defaults to 'info'.
Example:
>>> log_message(logger, "Database connection failed", "error")
"""
# Dynamically call the appropriate logging method
getattr(logger, level)(message)
# Example usage:
if __name__ == "__main__":
logger = get_logger(name="Test", log_to_console=True, log_to_file=True)
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("This is a critical message")