mirror of
https://github.com/jmagar/unraid-mcp.git
synced 2026-03-01 16:04:24 -08:00
Addresses issues found by 4 parallel review agents (code-reviewer,
silent-failure-hunter, type-design-analyzer, pr-test-analyzer).
Source fixes:
- core/utils.py: add public safe_display_url() (moved from tools/health.py)
- core/client.py: rename _redact_sensitive → redact_sensitive (public API)
- core/types.py: add SubscriptionData.__post_init__ for tz-aware datetime
enforcement; remove 6 unused type aliases (SystemHealth, APIResponse, etc.)
- subscriptions/manager.py: add exc_info=True to both except-Exception blocks;
add except ValueError break-on-config-error before retry loop; import
redact_sensitive by new public name
- subscriptions/resources.py: re-raise in autostart_subscriptions() so
ensure_subscriptions_started() doesn't permanently set _subscriptions_started
- subscriptions/diagnostics.py: except ToolError: raise before broad except;
use safe_display_url() instead of raw URL slice
- tools/health.py: move _safe_display_url to core/utils; add exc_info=True;
raise ToolError (not return dict) on ImportError
- tools/info.py: use get_args(INFO_ACTIONS) instead of INFO_ACTIONS.__args__
- tools/{array,docker,keys,notifications,rclone,storage,virtualization}.py:
add Literal-vs-ALL_ACTIONS sync check at import time
Test fixes:
- test_health.py: import safe_display_url from core.utils; update
test_diagnose_import_error_internal to expect ToolError (not error dict)
- test_storage.py: add 3 safe_get tests for zero/False/empty-string values
- test_subscription_manager.py: add TestCapLogContentSingleMassiveLine (2 tests)
- test_client.py: rename _redact_sensitive → redact_sensitive; add tests for
new sensitive keys and is_cacheable explicit-keyword form
252 lines
8.0 KiB
Python
252 lines
8.0 KiB
Python
"""Logging configuration for Unraid MCP Server.
|
|
|
|
This module sets up structured logging with Rich console and overwrite file handlers
|
|
that cap at 10MB and start over (no rotation) for consistent use across all modules.
|
|
"""
|
|
|
|
import logging
|
|
from pathlib import Path
|
|
|
|
from rich.console import Console
|
|
from rich.logging import RichHandler
|
|
|
|
|
|
try:
|
|
from fastmcp.utilities.logging import get_logger as get_fastmcp_logger
|
|
|
|
FASTMCP_AVAILABLE = True
|
|
except ImportError:
|
|
FASTMCP_AVAILABLE = False
|
|
|
|
from .settings import LOG_FILE_PATH, LOG_LEVEL_STR
|
|
|
|
|
|
# Global Rich console for consistent formatting
|
|
console = Console(stderr=True)
|
|
|
|
|
|
class OverwriteFileHandler(logging.FileHandler):
|
|
"""Custom file handler that overwrites the log file when it reaches max size."""
|
|
|
|
def __init__(self, filename, max_bytes=10 * 1024 * 1024, mode="a", encoding=None, delay=False):
|
|
"""Initialize the handler.
|
|
|
|
Args:
|
|
filename: Path to the log file
|
|
max_bytes: Maximum file size in bytes before overwriting (default: 10MB)
|
|
mode: File open mode
|
|
encoding: File encoding
|
|
delay: Whether to delay file opening
|
|
"""
|
|
self.max_bytes = max_bytes
|
|
self._emit_count = 0
|
|
self._check_interval = 100
|
|
super().__init__(filename, mode, encoding, delay)
|
|
|
|
def emit(self, record):
|
|
"""Emit a record, checking file size periodically and overwriting if needed."""
|
|
self._emit_count += 1
|
|
if (
|
|
(self._emit_count == 1 or self._emit_count % self._check_interval == 0)
|
|
and self.stream
|
|
and hasattr(self.stream, "name")
|
|
):
|
|
try:
|
|
base_path = Path(self.baseFilename)
|
|
if base_path.exists():
|
|
file_size = base_path.stat().st_size
|
|
if file_size >= self.max_bytes:
|
|
# Close current stream
|
|
if self.stream:
|
|
self.stream.close()
|
|
|
|
# Remove the old file and start fresh
|
|
if base_path.exists():
|
|
base_path.unlink()
|
|
|
|
# Reopen with truncate mode
|
|
self.stream = self._open()
|
|
|
|
# Log a marker that the file was reset
|
|
reset_record = logging.LogRecord(
|
|
name="UnraidMCPServer.Logging",
|
|
level=logging.INFO,
|
|
pathname="",
|
|
lineno=0,
|
|
msg="=== LOG FILE RESET (10MB limit reached) ===",
|
|
args=(),
|
|
exc_info=None,
|
|
)
|
|
super().emit(reset_record)
|
|
|
|
except OSError as e:
|
|
import sys
|
|
|
|
print(
|
|
f"WARNING: Log file size check failed: {e}. Continuing without rotation.",
|
|
file=sys.stderr,
|
|
)
|
|
|
|
# Emit the original record
|
|
super().emit(record)
|
|
|
|
|
|
def _create_shared_file_handler() -> OverwriteFileHandler:
|
|
"""Create the single shared file handler for all loggers.
|
|
|
|
Returns:
|
|
Configured OverwriteFileHandler instance
|
|
"""
|
|
numeric_log_level = getattr(logging, LOG_LEVEL_STR, logging.INFO)
|
|
handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8")
|
|
handler.setLevel(numeric_log_level)
|
|
handler.setFormatter(
|
|
logging.Formatter(
|
|
"%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s"
|
|
)
|
|
)
|
|
return handler
|
|
|
|
|
|
# Single shared file handler — all loggers reuse this instance to avoid
|
|
# race conditions from multiple OverwriteFileHandler instances on the same file.
|
|
_shared_file_handler = _create_shared_file_handler()
|
|
|
|
|
|
def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger:
|
|
"""Set up and configure the logger with console and file handlers.
|
|
|
|
Args:
|
|
name: Logger name (defaults to UnraidMCPServer)
|
|
|
|
Returns:
|
|
Configured logger instance
|
|
"""
|
|
# Get numeric log level
|
|
numeric_log_level = getattr(logging, LOG_LEVEL_STR, logging.INFO)
|
|
|
|
# Define the logger
|
|
logger = logging.getLogger(name)
|
|
logger.setLevel(numeric_log_level)
|
|
logger.propagate = False # Prevent root logger from duplicating handlers
|
|
|
|
# Clear any existing handlers
|
|
logger.handlers.clear()
|
|
|
|
# Rich Console Handler for beautiful output
|
|
console_handler = RichHandler(
|
|
console=console,
|
|
show_time=True,
|
|
show_level=True,
|
|
show_path=False,
|
|
rich_tracebacks=True,
|
|
tracebacks_show_locals=False,
|
|
)
|
|
console_handler.setLevel(numeric_log_level)
|
|
logger.addHandler(console_handler)
|
|
|
|
# Reuse the shared file handler
|
|
logger.addHandler(_shared_file_handler)
|
|
|
|
return logger
|
|
|
|
|
|
def configure_fastmcp_logger_with_rich() -> logging.Logger | None:
|
|
"""Configure FastMCP logger to use Rich formatting with Nordic colors."""
|
|
if not FASTMCP_AVAILABLE:
|
|
return None
|
|
|
|
# Get numeric log level
|
|
numeric_log_level = getattr(logging, LOG_LEVEL_STR, logging.INFO)
|
|
|
|
# Get the FastMCP logger
|
|
fastmcp_logger = get_fastmcp_logger("UnraidMCPServer")
|
|
|
|
# Clear existing handlers
|
|
fastmcp_logger.handlers.clear()
|
|
fastmcp_logger.propagate = False
|
|
|
|
# Rich Console Handler
|
|
console_handler = RichHandler(
|
|
console=console,
|
|
show_time=True,
|
|
show_level=True,
|
|
show_path=False,
|
|
rich_tracebacks=True,
|
|
tracebacks_show_locals=False,
|
|
markup=True,
|
|
)
|
|
console_handler.setLevel(numeric_log_level)
|
|
fastmcp_logger.addHandler(console_handler)
|
|
|
|
# Reuse the shared file handler
|
|
fastmcp_logger.addHandler(_shared_file_handler)
|
|
|
|
fastmcp_logger.setLevel(numeric_log_level)
|
|
|
|
# Also configure the root logger to catch any other logs
|
|
root_logger = logging.getLogger()
|
|
root_logger.handlers.clear()
|
|
root_logger.propagate = False
|
|
|
|
# Rich Console Handler for root logger
|
|
root_console_handler = RichHandler(
|
|
console=console,
|
|
show_time=True,
|
|
show_level=True,
|
|
show_path=False,
|
|
rich_tracebacks=True,
|
|
tracebacks_show_locals=False,
|
|
markup=True,
|
|
)
|
|
root_console_handler.setLevel(numeric_log_level)
|
|
root_logger.addHandler(root_console_handler)
|
|
|
|
# Reuse the shared file handler for root logger
|
|
root_logger.addHandler(_shared_file_handler)
|
|
root_logger.setLevel(numeric_log_level)
|
|
|
|
return fastmcp_logger
|
|
|
|
|
|
def log_configuration_status(logger: logging.Logger) -> None:
|
|
"""Log configuration status at startup.
|
|
|
|
Args:
|
|
logger: Logger instance to use for logging
|
|
"""
|
|
from .settings import get_config_summary
|
|
|
|
logger.info(f"Logging initialized (console and file: {LOG_FILE_PATH}).")
|
|
|
|
config = get_config_summary()
|
|
|
|
# Log configuration status
|
|
if config["api_url_configured"]:
|
|
logger.info(f"UNRAID_API_URL loaded: {config['api_url_preview']}")
|
|
else:
|
|
logger.warning("UNRAID_API_URL not found in environment or .env file.")
|
|
|
|
if config["api_key_configured"]:
|
|
logger.info("UNRAID_API_KEY loaded: ****") # Don't log the key itself
|
|
else:
|
|
logger.warning("UNRAID_API_KEY not found in environment or .env file.")
|
|
|
|
logger.info(f"UNRAID_MCP_PORT set to: {config['server_port']}")
|
|
logger.info(f"UNRAID_MCP_HOST set to: {config['server_host']}")
|
|
logger.info(f"UNRAID_MCP_TRANSPORT set to: {config['transport']}")
|
|
logger.info(f"UNRAID_MCP_LOG_LEVEL set to: {config['log_level']}")
|
|
|
|
if not config["config_valid"]:
|
|
logger.error(f"Missing required configuration: {config['missing_config']}")
|
|
|
|
|
|
# Global logger instance - modules can import this directly
|
|
if FASTMCP_AVAILABLE:
|
|
# Use FastMCP logger with Rich formatting
|
|
_fastmcp_logger = configure_fastmcp_logger_with_rich()
|
|
logger = _fastmcp_logger if _fastmcp_logger is not None else setup_logger()
|
|
else:
|
|
# Fallback to our custom logger if FastMCP is not available
|
|
logger = setup_logger()
|