forked from HomeLab/unraid-mcp
lintfree
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
"""Unraid MCP Server Package.
|
||||
|
||||
A modular MCP (Model Context Protocol) server that provides tools to interact
|
||||
A modular MCP (Model Context Protocol) server that provides tools to interact
|
||||
with an Unraid server's GraphQL API.
|
||||
"""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
__version__ = "0.1.0"
|
||||
|
||||
@@ -1 +1 @@
|
||||
"""Configuration management for Unraid MCP Server."""
|
||||
"""Configuration management for Unraid MCP Server."""
|
||||
|
||||
@@ -5,16 +5,16 @@ that can be used consistently across all modules and development scripts.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from datetime import datetime
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
import pytz
|
||||
from rich.align import Align
|
||||
from rich.console import Console
|
||||
from rich.logging import RichHandler
|
||||
from rich.text import Text
|
||||
from rich.panel import Panel
|
||||
from rich.align import Align
|
||||
from rich.rule import Rule
|
||||
from rich.text import Text
|
||||
|
||||
try:
|
||||
from fastmcp.utilities.logging import get_logger as get_fastmcp_logger
|
||||
@@ -22,7 +22,7 @@ try:
|
||||
except ImportError:
|
||||
FASTMCP_AVAILABLE = False
|
||||
|
||||
from .settings import LOG_LEVEL_STR, LOG_FILE_PATH
|
||||
from .settings import LOG_FILE_PATH, LOG_LEVEL_STR
|
||||
|
||||
# Global Rich console for consistent formatting
|
||||
console = Console(stderr=True, force_terminal=True)
|
||||
@@ -30,24 +30,24 @@ console = Console(stderr=True, force_terminal=True)
|
||||
|
||||
def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger:
|
||||
"""Set up and configure the logger with console and file handlers.
|
||||
|
||||
|
||||
Args:
|
||||
name: Logger name (defaults to UnraidMCPServer)
|
||||
|
||||
|
||||
Returns:
|
||||
Configured logger instance
|
||||
"""
|
||||
# Get numeric log level
|
||||
numeric_log_level = getattr(logging, LOG_LEVEL_STR, logging.INFO)
|
||||
|
||||
|
||||
# Define the logger
|
||||
logger = logging.getLogger(name)
|
||||
logger.setLevel(numeric_log_level)
|
||||
logger.propagate = False # Prevent root logger from duplicating handlers
|
||||
|
||||
|
||||
# Clear any existing handlers
|
||||
logger.handlers.clear()
|
||||
|
||||
|
||||
# Rich Console Handler for beautiful output
|
||||
console_handler = RichHandler(
|
||||
console=console,
|
||||
@@ -59,13 +59,13 @@ def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger:
|
||||
)
|
||||
console_handler.setLevel(numeric_log_level)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
|
||||
# File Handler with Rotation
|
||||
# Rotate logs at 5MB, keep 3 backup logs
|
||||
file_handler = RotatingFileHandler(
|
||||
LOG_FILE_PATH,
|
||||
maxBytes=5*1024*1024,
|
||||
backupCount=3,
|
||||
LOG_FILE_PATH,
|
||||
maxBytes=5*1024*1024,
|
||||
backupCount=3,
|
||||
encoding='utf-8'
|
||||
)
|
||||
file_handler.setLevel(numeric_log_level)
|
||||
@@ -74,25 +74,25 @@ def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger:
|
||||
)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
def configure_fastmcp_logger_with_rich():
|
||||
def configure_fastmcp_logger_with_rich() -> logging.Logger | None:
|
||||
"""Configure FastMCP logger to use Rich formatting with Nordic colors."""
|
||||
if not FASTMCP_AVAILABLE:
|
||||
return None
|
||||
|
||||
|
||||
# Get numeric log level
|
||||
numeric_log_level = getattr(logging, LOG_LEVEL_STR, logging.INFO)
|
||||
|
||||
|
||||
# Get the FastMCP logger
|
||||
fastmcp_logger = get_fastmcp_logger("UnraidMCPServer")
|
||||
|
||||
|
||||
# Clear existing handlers
|
||||
fastmcp_logger.handlers.clear()
|
||||
fastmcp_logger.propagate = False
|
||||
|
||||
|
||||
# Rich Console Handler
|
||||
console_handler = RichHandler(
|
||||
console=console,
|
||||
@@ -105,12 +105,12 @@ def configure_fastmcp_logger_with_rich():
|
||||
)
|
||||
console_handler.setLevel(numeric_log_level)
|
||||
fastmcp_logger.addHandler(console_handler)
|
||||
|
||||
|
||||
# File Handler with Rotation
|
||||
file_handler = RotatingFileHandler(
|
||||
LOG_FILE_PATH,
|
||||
maxBytes=5*1024*1024,
|
||||
backupCount=3,
|
||||
LOG_FILE_PATH,
|
||||
maxBytes=5*1024*1024,
|
||||
backupCount=3,
|
||||
encoding='utf-8'
|
||||
)
|
||||
file_handler.setLevel(numeric_log_level)
|
||||
@@ -119,14 +119,14 @@ def configure_fastmcp_logger_with_rich():
|
||||
)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
fastmcp_logger.addHandler(file_handler)
|
||||
|
||||
|
||||
fastmcp_logger.setLevel(numeric_log_level)
|
||||
|
||||
|
||||
# Also configure the root logger to catch any other logs
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.handlers.clear()
|
||||
root_logger.propagate = False
|
||||
|
||||
|
||||
# Rich Console Handler for root logger
|
||||
root_console_handler = RichHandler(
|
||||
console=console,
|
||||
@@ -139,23 +139,23 @@ def configure_fastmcp_logger_with_rich():
|
||||
)
|
||||
root_console_handler.setLevel(numeric_log_level)
|
||||
root_logger.addHandler(root_console_handler)
|
||||
|
||||
|
||||
# File Handler for root logger
|
||||
root_file_handler = RotatingFileHandler(
|
||||
LOG_FILE_PATH,
|
||||
maxBytes=5*1024*1024,
|
||||
backupCount=3,
|
||||
LOG_FILE_PATH,
|
||||
maxBytes=5*1024*1024,
|
||||
backupCount=3,
|
||||
encoding='utf-8'
|
||||
)
|
||||
root_file_handler.setLevel(numeric_log_level)
|
||||
root_file_handler.setFormatter(file_formatter)
|
||||
root_logger.addHandler(root_file_handler)
|
||||
root_logger.setLevel(numeric_log_level)
|
||||
|
||||
|
||||
return fastmcp_logger
|
||||
|
||||
|
||||
def setup_uvicorn_logging():
|
||||
def setup_uvicorn_logging() -> logging.Logger | None:
|
||||
"""Configure uvicorn and other third-party loggers to use Rich formatting."""
|
||||
# This function is kept for backward compatibility but now delegates to FastMCP
|
||||
return configure_fastmcp_logger_with_rich()
|
||||
@@ -163,32 +163,32 @@ def setup_uvicorn_logging():
|
||||
|
||||
def log_configuration_status(logger: logging.Logger) -> None:
|
||||
"""Log configuration status at startup.
|
||||
|
||||
|
||||
Args:
|
||||
logger: Logger instance to use for logging
|
||||
"""
|
||||
from .settings import get_config_summary
|
||||
|
||||
|
||||
logger.info(f"Logging initialized (console and file: {LOG_FILE_PATH}).")
|
||||
|
||||
|
||||
config = get_config_summary()
|
||||
|
||||
|
||||
# Log configuration status
|
||||
if config['api_url_configured']:
|
||||
logger.info(f"UNRAID_API_URL loaded: {config['api_url_preview']}")
|
||||
else:
|
||||
logger.warning("UNRAID_API_URL not found in environment or .env file.")
|
||||
|
||||
|
||||
if config['api_key_configured']:
|
||||
logger.info("UNRAID_API_KEY loaded: ****") # Don't log the key itself
|
||||
else:
|
||||
logger.warning("UNRAID_API_KEY not found in environment or .env file.")
|
||||
|
||||
|
||||
logger.info(f"UNRAID_MCP_PORT set to: {config['server_port']}")
|
||||
logger.info(f"UNRAID_MCP_HOST set to: {config['server_host']}")
|
||||
logger.info(f"UNRAID_MCP_TRANSPORT set to: {config['transport']}")
|
||||
logger.info(f"UNRAID_MCP_LOG_LEVEL set to: {config['log_level']}")
|
||||
|
||||
|
||||
if not config['config_valid']:
|
||||
logger.error(f"Missing required configuration: {config['missing_config']}")
|
||||
|
||||
@@ -200,7 +200,7 @@ def get_est_timestamp() -> str:
|
||||
now = datetime.now(est)
|
||||
return now.strftime("%y/%m/%d %H:%M:%S")
|
||||
|
||||
def log_header(title: str):
|
||||
def log_header(title: str) -> None:
|
||||
"""Print a beautiful header panel with Nordic blue styling."""
|
||||
panel = Panel(
|
||||
Align.center(Text(title, style="bold white")),
|
||||
@@ -210,11 +210,11 @@ def log_header(title: str):
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0):
|
||||
def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0) -> None:
|
||||
"""Log a message with specific level and indentation."""
|
||||
timestamp = get_est_timestamp()
|
||||
indent_str = " " * indent
|
||||
|
||||
|
||||
# Enhanced Nordic color scheme with more blues
|
||||
level_config = {
|
||||
"error": {"color": "#BF616A", "icon": "❌", "style": "bold"}, # Nordic red
|
||||
@@ -224,20 +224,20 @@ def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0
|
||||
"status": {"color": "#81A1C1", "icon": "🔍", "style": ""}, # Light Nordic blue
|
||||
"debug": {"color": "#4C566A", "icon": "🐛", "style": ""}, # Nordic dark gray
|
||||
}
|
||||
|
||||
|
||||
config = level_config.get(level, {"color": "#81A1C1", "icon": "•", "style": ""}) # Default to light Nordic blue
|
||||
|
||||
|
||||
# Create beautifully formatted text
|
||||
text = Text()
|
||||
|
||||
|
||||
# Timestamp with Nordic blue styling
|
||||
text.append(f"[{timestamp}]", style="#81A1C1") # Light Nordic blue for timestamps
|
||||
text.append(" ")
|
||||
|
||||
# Indentation with Nordic blue styling
|
||||
|
||||
# Indentation with Nordic blue styling
|
||||
if indent > 0:
|
||||
text.append(indent_str, style="#81A1C1")
|
||||
|
||||
|
||||
# Level icon (only for certain levels)
|
||||
if level in ["error", "warning", "success"]:
|
||||
# Extract emoji from message if it starts with one, to avoid duplication
|
||||
@@ -246,42 +246,44 @@ def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0
|
||||
pass
|
||||
else:
|
||||
text.append(f"{config['icon']} ", style=config["color"])
|
||||
|
||||
|
||||
# Message content
|
||||
message_style = f"{config['color']} {config['style']}".strip()
|
||||
text.append(message, style=message_style)
|
||||
|
||||
|
||||
console.print(text)
|
||||
|
||||
def log_separator():
|
||||
def log_separator() -> None:
|
||||
"""Print a beautiful separator line with Nordic blue styling."""
|
||||
console.print(Rule(style="#81A1C1"))
|
||||
|
||||
# Convenience functions for different log levels
|
||||
def log_error(message: str, indent: int = 0):
|
||||
def log_error(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "error", indent)
|
||||
|
||||
def log_warning(message: str, indent: int = 0):
|
||||
def log_warning(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "warning", indent)
|
||||
|
||||
def log_success(message: str, indent: int = 0):
|
||||
def log_success(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "success", indent)
|
||||
|
||||
def log_info(message: str, indent: int = 0):
|
||||
def log_info(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "info", indent)
|
||||
|
||||
def log_status(message: str, indent: int = 0):
|
||||
def log_status(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "status", indent)
|
||||
|
||||
# Global logger instance - modules can import this directly
|
||||
if FASTMCP_AVAILABLE:
|
||||
# Use FastMCP logger with Rich formatting
|
||||
logger = configure_fastmcp_logger_with_rich()
|
||||
if logger is None:
|
||||
_fastmcp_logger = configure_fastmcp_logger_with_rich()
|
||||
if _fastmcp_logger is not None:
|
||||
logger = _fastmcp_logger
|
||||
else:
|
||||
# Fallback to our custom logger if FastMCP configuration fails
|
||||
logger = setup_logger()
|
||||
else:
|
||||
# Fallback to our custom logger if FastMCP is not available
|
||||
logger = setup_logger()
|
||||
# Setup uvicorn logging when module is imported
|
||||
setup_uvicorn_logging()
|
||||
setup_uvicorn_logging()
|
||||
|
||||
@@ -6,7 +6,8 @@ and provides all configuration constants used throughout the application.
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from typing import Any
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Get the script directory (config module location)
|
||||
@@ -40,7 +41,7 @@ UNRAID_MCP_TRANSPORT = os.getenv("UNRAID_MCP_TRANSPORT", "streamable-http").lowe
|
||||
# SSL Configuration
|
||||
raw_verify_ssl = os.getenv("UNRAID_VERIFY_SSL", "true").lower()
|
||||
if raw_verify_ssl in ["false", "0", "no"]:
|
||||
UNRAID_VERIFY_SSL: Union[bool, str] = False
|
||||
UNRAID_VERIFY_SSL: bool | str = False
|
||||
elif raw_verify_ssl in ["true", "1", "yes"]:
|
||||
UNRAID_VERIFY_SSL = True
|
||||
else: # Path to CA bundle
|
||||
@@ -62,9 +63,9 @@ TIMEOUT_CONFIG = {
|
||||
}
|
||||
|
||||
|
||||
def validate_required_config() -> bool:
|
||||
def validate_required_config() -> tuple[bool, list[str]]:
|
||||
"""Validate that required configuration is present.
|
||||
|
||||
|
||||
Returns:
|
||||
bool: True if all required config is present, False otherwise.
|
||||
"""
|
||||
@@ -72,23 +73,23 @@ def validate_required_config() -> bool:
|
||||
("UNRAID_API_URL", UNRAID_API_URL),
|
||||
("UNRAID_API_KEY", UNRAID_API_KEY)
|
||||
]
|
||||
|
||||
|
||||
missing = []
|
||||
for name, value in required_vars:
|
||||
if not value:
|
||||
missing.append(name)
|
||||
|
||||
|
||||
return len(missing) == 0, missing
|
||||
|
||||
|
||||
def get_config_summary() -> dict:
|
||||
def get_config_summary() -> dict[str, Any]:
|
||||
"""Get a summary of current configuration (safe for logging).
|
||||
|
||||
|
||||
Returns:
|
||||
dict: Configuration summary with sensitive data redacted.
|
||||
"""
|
||||
is_valid, missing = validate_required_config()
|
||||
|
||||
|
||||
return {
|
||||
'api_url_configured': bool(UNRAID_API_URL),
|
||||
'api_url_preview': UNRAID_API_URL[:20] + '...' if UNRAID_API_URL else None,
|
||||
@@ -101,4 +102,4 @@ def get_config_summary() -> dict:
|
||||
'log_file': str(LOG_FILE_PATH),
|
||||
'config_valid': is_valid,
|
||||
'missing_config': missing if not is_valid else None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
"""Core infrastructure components for Unraid MCP Server."""
|
||||
"""Core infrastructure components for Unraid MCP Server."""
|
||||
|
||||
@@ -81,7 +81,7 @@ async def make_graphql_request(
|
||||
"User-Agent": "UnraidMCPServer/0.1.0" # Custom user-agent
|
||||
}
|
||||
|
||||
payload = {"query": query}
|
||||
payload: dict[str, Any] = {"query": query}
|
||||
if variables:
|
||||
payload["variables"] = variables
|
||||
|
||||
@@ -119,17 +119,18 @@ async def make_graphql_request(
|
||||
raise ToolError(f"GraphQL API error: {error_details}")
|
||||
|
||||
logger.debug("GraphQL request successful.")
|
||||
return response_data.get("data", {}) # Return only the data part
|
||||
data = response_data.get("data", {})
|
||||
return data if isinstance(data, dict) else {} # Ensure we return dict
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
|
||||
raise ToolError(f"HTTP error {e.response.status_code}: {e.response.text}")
|
||||
raise ToolError(f"HTTP error {e.response.status_code}: {e.response.text}") from e
|
||||
except httpx.RequestError as e:
|
||||
logger.error(f"Request error occurred: {e}")
|
||||
raise ToolError(f"Network connection error: {str(e)}")
|
||||
raise ToolError(f"Network connection error: {str(e)}") from e
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Failed to decode JSON response: {e}")
|
||||
raise ToolError(f"Invalid JSON response from Unraid API: {str(e)}")
|
||||
raise ToolError(f"Invalid JSON response from Unraid API: {str(e)}") from e
|
||||
|
||||
|
||||
def get_timeout_for_operation(operation_type: str = "default") -> httpx.Timeout:
|
||||
|
||||
@@ -6,13 +6,13 @@ multiple modules for consistent data handling.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
class SubscriptionData:
|
||||
"""Container for subscription data with metadata."""
|
||||
data: Dict[str, Any]
|
||||
data: dict[str, Any]
|
||||
last_updated: datetime
|
||||
subscription_type: str
|
||||
|
||||
@@ -24,20 +24,20 @@ class SystemHealth:
|
||||
issues: list[str]
|
||||
warnings: list[str]
|
||||
last_checked: datetime
|
||||
component_status: Dict[str, str]
|
||||
component_status: dict[str, str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class APIResponse:
|
||||
"""Container for standardized API response data."""
|
||||
success: bool
|
||||
data: Optional[Dict[str, Any]] = None
|
||||
error: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
data: dict[str, Any] | None = None
|
||||
error: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
# Type aliases for common data structures
|
||||
ConfigValue = Union[str, int, bool, float, None]
|
||||
ConfigDict = Dict[str, ConfigValue]
|
||||
GraphQLVariables = Dict[str, Any]
|
||||
HealthStatus = Dict[str, Union[str, bool, int, list]]
|
||||
ConfigValue = str | int | bool | float | None
|
||||
ConfigDict = dict[str, ConfigValue]
|
||||
GraphQLVariables = dict[str, Any]
|
||||
HealthStatus = dict[str, str | bool | int | list[Any]]
|
||||
|
||||
@@ -6,7 +6,7 @@ the modular server implementation from unraid_mcp.server.
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
def main() -> None:
|
||||
"""Main entry point for the Unraid MCP Server."""
|
||||
try:
|
||||
from .server import run_server
|
||||
@@ -19,4 +19,4 @@ def main():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
||||
@@ -8,7 +8,7 @@ import sys
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from .config.logging import logger, console
|
||||
from .config.logging import logger
|
||||
from .config.settings import (
|
||||
UNRAID_API_KEY,
|
||||
UNRAID_API_URL,
|
||||
@@ -37,10 +37,10 @@ mcp = FastMCP(
|
||||
subscription_manager = SubscriptionManager()
|
||||
|
||||
|
||||
async def autostart_subscriptions():
|
||||
async def autostart_subscriptions() -> None:
|
||||
"""Auto-start all subscriptions marked for auto-start in SubscriptionManager"""
|
||||
logger.info("[AUTOSTART] Initiating subscription auto-start process...")
|
||||
|
||||
|
||||
try:
|
||||
# Use the SubscriptionManager auto-start method
|
||||
await subscription_manager.auto_start_all_subscriptions()
|
||||
@@ -49,44 +49,44 @@ async def autostart_subscriptions():
|
||||
logger.error(f"[AUTOSTART] Failed during auto-start process: {e}", exc_info=True)
|
||||
|
||||
|
||||
def register_all_modules():
|
||||
def register_all_modules() -> None:
|
||||
"""Register all tools and resources with the MCP instance."""
|
||||
try:
|
||||
# Register subscription resources first
|
||||
register_subscription_resources(mcp)
|
||||
logger.info("📊 Subscription resources registered")
|
||||
|
||||
|
||||
# Register diagnostic tools
|
||||
register_diagnostic_tools(mcp)
|
||||
logger.info("🔧 Diagnostic tools registered")
|
||||
|
||||
|
||||
# Register all tool categories
|
||||
register_system_tools(mcp)
|
||||
logger.info("🖥️ System tools registered")
|
||||
|
||||
register_docker_tools(mcp)
|
||||
|
||||
register_docker_tools(mcp)
|
||||
logger.info("🐳 Docker tools registered")
|
||||
|
||||
|
||||
register_vm_tools(mcp)
|
||||
logger.info("💻 Virtualization tools registered")
|
||||
|
||||
|
||||
register_storage_tools(mcp)
|
||||
logger.info("💾 Storage tools registered")
|
||||
|
||||
|
||||
register_health_tools(mcp)
|
||||
logger.info("🏥 Health tools registered")
|
||||
|
||||
|
||||
register_rclone_tools(mcp)
|
||||
logger.info("☁️ RClone tools registered")
|
||||
|
||||
|
||||
logger.info("🎯 All modules registered successfully - Server ready!")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to register modules: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
def run_server():
|
||||
def run_server() -> None:
|
||||
"""Run the MCP server with the configured transport."""
|
||||
# Log configuration
|
||||
if UNRAID_API_URL:
|
||||
@@ -105,16 +105,16 @@ def run_server():
|
||||
|
||||
# Register all modules
|
||||
register_all_modules()
|
||||
|
||||
|
||||
logger.info(f"🚀 Starting Unraid MCP Server on {UNRAID_MCP_HOST}:{UNRAID_MCP_PORT} using {UNRAID_MCP_TRANSPORT} transport...")
|
||||
|
||||
|
||||
try:
|
||||
# Auto-start subscriptions on first async operation
|
||||
if UNRAID_MCP_TRANSPORT == "streamable-http":
|
||||
# Use the recommended Streamable HTTP transport
|
||||
mcp.run(
|
||||
transport="streamable-http",
|
||||
host=UNRAID_MCP_HOST,
|
||||
transport="streamable-http",
|
||||
host=UNRAID_MCP_HOST,
|
||||
port=UNRAID_MCP_PORT,
|
||||
path="/mcp" # Standard path for MCP
|
||||
)
|
||||
@@ -122,8 +122,8 @@ def run_server():
|
||||
# Deprecated SSE transport - log warning
|
||||
logger.warning("SSE transport is deprecated and may be removed in a future version. Consider switching to 'streamable-http'.")
|
||||
mcp.run(
|
||||
transport="sse",
|
||||
host=UNRAID_MCP_HOST,
|
||||
transport="sse",
|
||||
host=UNRAID_MCP_HOST,
|
||||
port=UNRAID_MCP_PORT,
|
||||
path="/mcp" # Keep custom path for SSE
|
||||
)
|
||||
@@ -138,4 +138,4 @@ def run_server():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_server()
|
||||
run_server()
|
||||
|
||||
@@ -1 +1 @@
|
||||
"""WebSocket subscription system for real-time Unraid data."""
|
||||
"""WebSocket subscription system for real-time Unraid data."""
|
||||
|
||||
@@ -8,84 +8,87 @@ development and debugging purposes.
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict
|
||||
from typing import Any
|
||||
|
||||
import websockets
|
||||
from fastmcp import FastMCP
|
||||
from websockets.legacy.protocol import Subprotocol
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_URL, UNRAID_API_KEY, UNRAID_VERIFY_SSL
|
||||
from ..config.settings import UNRAID_API_KEY, UNRAID_API_URL, UNRAID_VERIFY_SSL
|
||||
from ..core.exceptions import ToolError
|
||||
from .manager import subscription_manager
|
||||
from .resources import ensure_subscriptions_started
|
||||
|
||||
|
||||
def register_diagnostic_tools(mcp: FastMCP):
|
||||
def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
"""Register diagnostic tools with the FastMCP instance.
|
||||
|
||||
|
||||
Args:
|
||||
mcp: FastMCP instance to register tools with
|
||||
"""
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def test_subscription_query(subscription_query: str) -> Dict[str, Any]:
|
||||
async def test_subscription_query(subscription_query: str) -> dict[str, Any]:
|
||||
"""
|
||||
Test a GraphQL subscription query directly to debug schema issues.
|
||||
Use this to find working subscription field names and structure.
|
||||
|
||||
|
||||
Args:
|
||||
subscription_query: The GraphQL subscription query to test
|
||||
|
||||
|
||||
Returns:
|
||||
Dict containing test results and response data
|
||||
"""
|
||||
try:
|
||||
logger.info(f"[TEST_SUBSCRIPTION] Testing query: {subscription_query}")
|
||||
|
||||
|
||||
# Build WebSocket URL
|
||||
if not UNRAID_API_URL:
|
||||
raise ToolError("UNRAID_API_URL is not configured")
|
||||
ws_url = UNRAID_API_URL.replace("https://", "wss://").replace("http://", "ws://") + "/graphql"
|
||||
|
||||
|
||||
# Test connection
|
||||
async with websockets.connect(
|
||||
ws_url,
|
||||
subprotocols=["graphql-transport-ws", "graphql-ws"],
|
||||
subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")],
|
||||
ssl=UNRAID_VERIFY_SSL,
|
||||
ping_interval=30,
|
||||
ping_timeout=10
|
||||
) as websocket:
|
||||
|
||||
|
||||
# Send connection init
|
||||
await websocket.send(json.dumps({
|
||||
"type": "connection_init",
|
||||
"payload": {"Authorization": f"Bearer {UNRAID_API_KEY}"}
|
||||
}))
|
||||
|
||||
|
||||
# Wait for ack
|
||||
response = await websocket.recv()
|
||||
init_response = json.loads(response)
|
||||
|
||||
|
||||
if init_response.get("type") != "connection_ack":
|
||||
return {"error": f"Connection failed: {init_response}"}
|
||||
|
||||
|
||||
# Send subscription
|
||||
await websocket.send(json.dumps({
|
||||
"id": "test",
|
||||
"type": "start",
|
||||
"payload": {"query": subscription_query}
|
||||
}))
|
||||
|
||||
|
||||
# Wait for response with timeout
|
||||
try:
|
||||
response = await asyncio.wait_for(websocket.recv(), timeout=5.0)
|
||||
result = json.loads(response)
|
||||
|
||||
|
||||
logger.info(f"[TEST_SUBSCRIPTION] Response: {result}")
|
||||
return {
|
||||
"success": True,
|
||||
"response": result,
|
||||
"query_tested": subscription_query
|
||||
}
|
||||
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
return {
|
||||
"success": True,
|
||||
@@ -93,7 +96,7 @@ def register_diagnostic_tools(mcp: FastMCP):
|
||||
"query_tested": subscription_query,
|
||||
"note": "Connection successful, subscription may be waiting for events"
|
||||
}
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[TEST_SUBSCRIPTION] Error: {e}", exc_info=True)
|
||||
return {
|
||||
@@ -102,25 +105,28 @@ def register_diagnostic_tools(mcp: FastMCP):
|
||||
}
|
||||
|
||||
@mcp.tool()
|
||||
async def diagnose_subscriptions() -> Dict[str, Any]:
|
||||
async def diagnose_subscriptions() -> dict[str, Any]:
|
||||
"""
|
||||
Comprehensive diagnostic tool for subscription system.
|
||||
Shows detailed status, connection states, errors, and troubleshooting info.
|
||||
|
||||
|
||||
Returns:
|
||||
Dict containing comprehensive subscription system diagnostics
|
||||
"""
|
||||
# Ensure subscriptions are started before diagnosing
|
||||
await ensure_subscriptions_started()
|
||||
|
||||
|
||||
try:
|
||||
logger.info("[DIAGNOSTIC] Running subscription diagnostics...")
|
||||
|
||||
|
||||
# Get comprehensive status
|
||||
status = subscription_manager.get_subscription_status()
|
||||
|
||||
# Add environment info
|
||||
diagnostic_info = {
|
||||
|
||||
# Initialize connection issues list with proper type
|
||||
connection_issues: list[dict[str, Any]] = []
|
||||
|
||||
# Add environment info with explicit typing
|
||||
diagnostic_info: dict[str, Any] = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"environment": {
|
||||
"auto_start_enabled": subscription_manager.auto_start_enabled,
|
||||
@@ -136,10 +142,10 @@ def register_diagnostic_tools(mcp: FastMCP):
|
||||
"active_count": len(subscription_manager.active_subscriptions),
|
||||
"with_data": len(subscription_manager.resource_data),
|
||||
"in_error_state": 0,
|
||||
"connection_issues": []
|
||||
"connection_issues": connection_issues
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Calculate WebSocket URL
|
||||
if UNRAID_API_URL:
|
||||
if UNRAID_API_URL.startswith('https://'):
|
||||
@@ -151,37 +157,37 @@ def register_diagnostic_tools(mcp: FastMCP):
|
||||
if not ws_url.endswith('/graphql'):
|
||||
ws_url = ws_url.rstrip('/') + '/graphql'
|
||||
diagnostic_info["environment"]["websocket_url"] = ws_url
|
||||
|
||||
|
||||
# Analyze issues
|
||||
for sub_name, sub_status in status.items():
|
||||
runtime = sub_status.get("runtime", {})
|
||||
connection_state = runtime.get("connection_state", "unknown")
|
||||
|
||||
|
||||
if connection_state in ["error", "auth_failed", "timeout", "max_retries_exceeded"]:
|
||||
diagnostic_info["summary"]["in_error_state"] += 1
|
||||
|
||||
|
||||
if runtime.get("last_error"):
|
||||
diagnostic_info["summary"]["connection_issues"].append({
|
||||
connection_issues.append({
|
||||
"subscription": sub_name,
|
||||
"state": connection_state,
|
||||
"error": runtime["last_error"]
|
||||
})
|
||||
|
||||
|
||||
# Add troubleshooting recommendations
|
||||
recommendations = []
|
||||
|
||||
recommendations: list[str] = []
|
||||
|
||||
if not diagnostic_info["environment"]["api_key_configured"]:
|
||||
recommendations.append("CRITICAL: No API key configured. Set UNRAID_API_KEY environment variable.")
|
||||
|
||||
|
||||
if diagnostic_info["summary"]["in_error_state"] > 0:
|
||||
recommendations.append("Some subscriptions are in error state. Check 'connection_issues' for details.")
|
||||
|
||||
|
||||
if diagnostic_info["summary"]["with_data"] == 0:
|
||||
recommendations.append("No subscriptions have received data yet. Check WebSocket connectivity and authentication.")
|
||||
|
||||
|
||||
if diagnostic_info["summary"]["active_count"] < diagnostic_info["summary"]["auto_start_count"]:
|
||||
recommendations.append("Not all auto-start subscriptions are active. Check server startup logs.")
|
||||
|
||||
|
||||
diagnostic_info["troubleshooting"] = {
|
||||
"recommendations": recommendations,
|
||||
"log_commands": [
|
||||
@@ -191,16 +197,16 @@ def register_diagnostic_tools(mcp: FastMCP):
|
||||
],
|
||||
"next_steps": [
|
||||
"If authentication fails: Verify API key has correct permissions",
|
||||
"If connection fails: Check network connectivity to Unraid server",
|
||||
"If connection fails: Check network connectivity to Unraid server",
|
||||
"If no data received: Enable DEBUG logging to see detailed protocol messages"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
logger.info(f"[DIAGNOSTIC] Completed. Active: {diagnostic_info['summary']['active_count']}, With data: {diagnostic_info['summary']['with_data']}, Errors: {diagnostic_info['summary']['in_error_state']}")
|
||||
return diagnostic_info
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[DIAGNOSTIC] Failed to generate diagnostics: {e}")
|
||||
raise ToolError(f"Failed to generate diagnostics: {str(e)}")
|
||||
raise ToolError(f"Failed to generate diagnostics: {str(e)}") from e
|
||||
|
||||
logger.info("Subscription diagnostic tools registered successfully")
|
||||
logger.info("Subscription diagnostic tools registered successfully")
|
||||
|
||||
@@ -9,31 +9,32 @@ import asyncio
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
import websockets
|
||||
from websockets.legacy.protocol import Subprotocol
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_URL, UNRAID_API_KEY
|
||||
from ..config.settings import UNRAID_API_KEY, UNRAID_API_URL
|
||||
from ..core.types import SubscriptionData
|
||||
|
||||
|
||||
class SubscriptionManager:
|
||||
"""Manages GraphQL subscriptions and converts them to MCP resources."""
|
||||
|
||||
def __init__(self):
|
||||
self.active_subscriptions: Dict[str, asyncio.Task] = {}
|
||||
self.resource_data: Dict[str, SubscriptionData] = {}
|
||||
self.websocket: Optional[websockets.WebSocketServerProtocol] = None
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.active_subscriptions: dict[str, asyncio.Task[None]] = {}
|
||||
self.resource_data: dict[str, SubscriptionData] = {}
|
||||
self.websocket: websockets.WebSocketServerProtocol | None = None
|
||||
self.subscription_lock = asyncio.Lock()
|
||||
|
||||
|
||||
# Configuration
|
||||
self.auto_start_enabled = os.getenv("UNRAID_AUTO_START_SUBSCRIPTIONS", "true").lower() == "true"
|
||||
self.reconnect_attempts: Dict[str, int] = {}
|
||||
self.reconnect_attempts: dict[str, int] = {}
|
||||
self.max_reconnect_attempts = int(os.getenv("UNRAID_MAX_RECONNECT_ATTEMPTS", "10"))
|
||||
self.connection_states: Dict[str, str] = {} # Track connection state per subscription
|
||||
self.last_error: Dict[str, str] = {} # Track last error per subscription
|
||||
|
||||
self.connection_states: dict[str, str] = {} # Track connection state per subscription
|
||||
self.last_error: dict[str, str] = {} # Track last error per subscription
|
||||
|
||||
# Define subscription configurations
|
||||
self.subscription_configs = {
|
||||
"logFileSubscription": {
|
||||
@@ -51,35 +52,35 @@ class SubscriptionManager:
|
||||
"auto_start": False # Started manually with path parameter
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
logger.info(f"[SUBSCRIPTION_MANAGER] Initialized with auto_start={self.auto_start_enabled}, max_reconnects={self.max_reconnect_attempts}")
|
||||
logger.debug(f"[SUBSCRIPTION_MANAGER] Available subscriptions: {list(self.subscription_configs.keys())}")
|
||||
|
||||
async def auto_start_all_subscriptions(self):
|
||||
|
||||
async def auto_start_all_subscriptions(self) -> None:
|
||||
"""Auto-start all subscriptions marked for auto-start."""
|
||||
if not self.auto_start_enabled:
|
||||
logger.info("[SUBSCRIPTION_MANAGER] Auto-start disabled")
|
||||
return
|
||||
|
||||
|
||||
logger.info("[SUBSCRIPTION_MANAGER] Starting auto-start process...")
|
||||
auto_start_count = 0
|
||||
|
||||
|
||||
for subscription_name, config in self.subscription_configs.items():
|
||||
if config.get("auto_start", False):
|
||||
try:
|
||||
logger.info(f"[SUBSCRIPTION_MANAGER] Auto-starting subscription: {subscription_name}")
|
||||
await self.start_subscription(subscription_name, config["query"])
|
||||
await self.start_subscription(subscription_name, str(config["query"]))
|
||||
auto_start_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"[SUBSCRIPTION_MANAGER] Failed to auto-start {subscription_name}: {e}")
|
||||
self.last_error[subscription_name] = str(e)
|
||||
|
||||
|
||||
logger.info(f"[SUBSCRIPTION_MANAGER] Auto-start completed. Started {auto_start_count} subscriptions")
|
||||
|
||||
async def start_subscription(self, subscription_name: str, query: str, variables: Dict[str, Any] = None):
|
||||
|
||||
async def start_subscription(self, subscription_name: str, query: str, variables: dict[str, Any] | None = None) -> None:
|
||||
"""Start a GraphQL subscription and maintain it as a resource."""
|
||||
logger.info(f"[SUBSCRIPTION:{subscription_name}] Starting subscription...")
|
||||
|
||||
|
||||
if subscription_name in self.active_subscriptions:
|
||||
logger.warning(f"[SUBSCRIPTION:{subscription_name}] Subscription already active, skipping")
|
||||
return
|
||||
@@ -87,7 +88,7 @@ class SubscriptionManager:
|
||||
# Reset connection tracking
|
||||
self.reconnect_attempts[subscription_name] = 0
|
||||
self.connection_states[subscription_name] = "starting"
|
||||
|
||||
|
||||
async with self.subscription_lock:
|
||||
try:
|
||||
task = asyncio.create_task(self._subscription_loop(subscription_name, query, variables or {}))
|
||||
@@ -99,11 +100,11 @@ class SubscriptionManager:
|
||||
self.connection_states[subscription_name] = "failed"
|
||||
self.last_error[subscription_name] = str(e)
|
||||
raise
|
||||
|
||||
async def stop_subscription(self, subscription_name: str):
|
||||
|
||||
async def stop_subscription(self, subscription_name: str) -> None:
|
||||
"""Stop a specific subscription."""
|
||||
logger.info(f"[SUBSCRIPTION:{subscription_name}] Stopping subscription...")
|
||||
|
||||
|
||||
async with self.subscription_lock:
|
||||
if subscription_name in self.active_subscriptions:
|
||||
task = self.active_subscriptions[subscription_name]
|
||||
@@ -117,63 +118,66 @@ class SubscriptionManager:
|
||||
logger.info(f"[SUBSCRIPTION:{subscription_name}] Subscription stopped")
|
||||
else:
|
||||
logger.warning(f"[SUBSCRIPTION:{subscription_name}] No active subscription to stop")
|
||||
|
||||
async def _subscription_loop(self, subscription_name: str, query: str, variables: Dict[str, Any]):
|
||||
|
||||
async def _subscription_loop(self, subscription_name: str, query: str, variables: dict[str, Any] | None) -> None:
|
||||
"""Main loop for maintaining a GraphQL subscription with comprehensive logging."""
|
||||
retry_delay = 5
|
||||
retry_delay: int | float = 5
|
||||
max_retry_delay = 300 # 5 minutes max
|
||||
|
||||
|
||||
while True:
|
||||
attempt = self.reconnect_attempts.get(subscription_name, 0) + 1
|
||||
self.reconnect_attempts[subscription_name] = attempt
|
||||
|
||||
|
||||
logger.info(f"[WEBSOCKET:{subscription_name}] Connection attempt #{attempt} (max: {self.max_reconnect_attempts})")
|
||||
|
||||
|
||||
if attempt > self.max_reconnect_attempts:
|
||||
logger.error(f"[WEBSOCKET:{subscription_name}] Max reconnection attempts ({self.max_reconnect_attempts}) exceeded, stopping")
|
||||
self.connection_states[subscription_name] = "max_retries_exceeded"
|
||||
break
|
||||
|
||||
|
||||
try:
|
||||
# Build WebSocket URL with detailed logging
|
||||
if not UNRAID_API_URL:
|
||||
raise ValueError("UNRAID_API_URL is not configured")
|
||||
|
||||
if UNRAID_API_URL.startswith('https://'):
|
||||
ws_url = 'wss://' + UNRAID_API_URL[len('https://'):]
|
||||
elif UNRAID_API_URL.startswith('http://'):
|
||||
ws_url = 'ws://' + UNRAID_API_URL[len('http://'):]
|
||||
else:
|
||||
ws_url = UNRAID_API_URL
|
||||
|
||||
|
||||
if not ws_url.endswith('/graphql'):
|
||||
ws_url = ws_url.rstrip('/') + '/graphql'
|
||||
|
||||
|
||||
logger.debug(f"[WEBSOCKET:{subscription_name}] Connecting to: {ws_url}")
|
||||
logger.debug(f"[WEBSOCKET:{subscription_name}] API Key present: {'Yes' if UNRAID_API_KEY else 'No'}")
|
||||
|
||||
|
||||
# Connection with timeout
|
||||
connect_timeout = 10
|
||||
logger.debug(f"[WEBSOCKET:{subscription_name}] Connection timeout: {connect_timeout}s")
|
||||
|
||||
|
||||
async with websockets.connect(
|
||||
ws_url,
|
||||
subprotocols=["graphql-transport-ws", "graphql-ws"],
|
||||
subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")],
|
||||
ping_interval=20,
|
||||
ping_timeout=10,
|
||||
close_timeout=10
|
||||
) as websocket:
|
||||
|
||||
|
||||
selected_proto = websocket.subprotocol or "none"
|
||||
logger.info(f"[WEBSOCKET:{subscription_name}] Connected! Protocol: {selected_proto}")
|
||||
self.connection_states[subscription_name] = "connected"
|
||||
|
||||
|
||||
# Reset retry count on successful connection
|
||||
self.reconnect_attempts[subscription_name] = 0
|
||||
retry_delay = 5 # Reset delay
|
||||
|
||||
|
||||
# Initialize GraphQL-WS protocol
|
||||
logger.debug(f"[PROTOCOL:{subscription_name}] Initializing GraphQL-WS protocol...")
|
||||
init_type = "connection_init"
|
||||
init_payload: Dict[str, Any] = {"type": init_type}
|
||||
|
||||
init_payload: dict[str, Any] = {"type": init_type}
|
||||
|
||||
if UNRAID_API_KEY:
|
||||
logger.debug(f"[AUTH:{subscription_name}] Adding authentication payload")
|
||||
auth_payload = {
|
||||
@@ -193,16 +197,17 @@ class SubscriptionManager:
|
||||
|
||||
logger.debug(f"[PROTOCOL:{subscription_name}] Sending connection_init message")
|
||||
await websocket.send(json.dumps(init_payload))
|
||||
|
||||
|
||||
# Wait for connection acknowledgment
|
||||
logger.debug(f"[PROTOCOL:{subscription_name}] Waiting for connection_ack...")
|
||||
init_raw = await asyncio.wait_for(websocket.recv(), timeout=30)
|
||||
|
||||
|
||||
try:
|
||||
init_data = json.loads(init_raw)
|
||||
logger.debug(f"[PROTOCOL:{subscription_name}] Received init response: {init_data.get('type')}")
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"[PROTOCOL:{subscription_name}] Failed to decode init response: {init_raw[:200]}...")
|
||||
init_preview = init_raw[:200] if isinstance(init_raw, str) else init_raw[:200].decode('utf-8', errors='replace')
|
||||
logger.error(f"[PROTOCOL:{subscription_name}] Failed to decode init response: {init_preview}...")
|
||||
self.last_error[subscription_name] = f"Invalid JSON in init response: {e}"
|
||||
break
|
||||
|
||||
@@ -219,7 +224,7 @@ class SubscriptionManager:
|
||||
else:
|
||||
logger.warning(f"[PROTOCOL:{subscription_name}] Unexpected init response: {init_data}")
|
||||
# Continue anyway - some servers send other messages first
|
||||
|
||||
|
||||
# Start the subscription
|
||||
logger.debug(f"[SUBSCRIPTION:{subscription_name}] Starting GraphQL subscription...")
|
||||
start_type = "subscribe" if selected_proto == "graphql-transport-ws" else "start"
|
||||
@@ -231,33 +236,32 @@ class SubscriptionManager:
|
||||
"variables": variables
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
logger.debug(f"[SUBSCRIPTION:{subscription_name}] Subscription message type: {start_type}")
|
||||
logger.debug(f"[SUBSCRIPTION:{subscription_name}] Query: {query[:100]}...")
|
||||
logger.debug(f"[SUBSCRIPTION:{subscription_name}] Variables: {variables}")
|
||||
|
||||
|
||||
await websocket.send(json.dumps(subscription_message))
|
||||
logger.info(f"[SUBSCRIPTION:{subscription_name}] Subscription started successfully")
|
||||
self.connection_states[subscription_name] = "subscribed"
|
||||
|
||||
|
||||
# Listen for subscription data
|
||||
message_count = 0
|
||||
last_data_time = datetime.now()
|
||||
|
||||
|
||||
async for message in websocket:
|
||||
try:
|
||||
data = json.loads(message)
|
||||
message_count += 1
|
||||
message_type = data.get('type', 'unknown')
|
||||
|
||||
|
||||
logger.debug(f"[DATA:{subscription_name}] Message #{message_count}: {message_type}")
|
||||
|
||||
|
||||
# Handle different message types
|
||||
expected_data_type = "next" if selected_proto == "graphql-transport-ws" else "data"
|
||||
|
||||
|
||||
if data.get("type") == expected_data_type and data.get("id") == subscription_name:
|
||||
payload = data.get("payload", {})
|
||||
|
||||
|
||||
if payload.get("data"):
|
||||
logger.info(f"[DATA:{subscription_name}] Received subscription data update")
|
||||
self.resource_data[subscription_name] = SubscriptionData(
|
||||
@@ -265,77 +269,78 @@ class SubscriptionManager:
|
||||
last_updated=datetime.now(),
|
||||
subscription_type=subscription_name
|
||||
)
|
||||
last_data_time = datetime.now()
|
||||
logger.debug(f"[RESOURCE:{subscription_name}] Resource data updated successfully")
|
||||
elif payload.get("errors"):
|
||||
logger.error(f"[DATA:{subscription_name}] GraphQL errors in response: {payload['errors']}")
|
||||
self.last_error[subscription_name] = f"GraphQL errors: {payload['errors']}"
|
||||
else:
|
||||
logger.warning(f"[DATA:{subscription_name}] Empty or invalid data payload: {payload}")
|
||||
|
||||
|
||||
elif data.get("type") == "ping":
|
||||
logger.debug(f"[PROTOCOL:{subscription_name}] Received ping, sending pong")
|
||||
await websocket.send(json.dumps({"type": "pong"}))
|
||||
|
||||
|
||||
elif data.get("type") == "error":
|
||||
error_payload = data.get('payload', {})
|
||||
logger.error(f"[SUBSCRIPTION:{subscription_name}] Subscription error: {error_payload}")
|
||||
self.last_error[subscription_name] = f"Subscription error: {error_payload}"
|
||||
self.connection_states[subscription_name] = "error"
|
||||
|
||||
|
||||
elif data.get("type") == "complete":
|
||||
logger.info(f"[SUBSCRIPTION:{subscription_name}] Subscription completed by server")
|
||||
self.connection_states[subscription_name] = "completed"
|
||||
break
|
||||
|
||||
|
||||
elif data.get("type") in ["ka", "ping", "pong"]:
|
||||
logger.debug(f"[PROTOCOL:{subscription_name}] Keepalive message: {message_type}")
|
||||
|
||||
|
||||
else:
|
||||
logger.debug(f"[PROTOCOL:{subscription_name}] Unhandled message type: {message_type}")
|
||||
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"[PROTOCOL:{subscription_name}] Failed to decode message: {message[:200]}...")
|
||||
msg_preview = message[:200] if isinstance(message, str) else message[:200].decode('utf-8', errors='replace')
|
||||
logger.error(f"[PROTOCOL:{subscription_name}] Failed to decode message: {msg_preview}...")
|
||||
logger.error(f"[PROTOCOL:{subscription_name}] JSON decode error: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"[DATA:{subscription_name}] Error processing message: {e}")
|
||||
logger.debug(f"[DATA:{subscription_name}] Raw message: {message[:200]}...")
|
||||
|
||||
msg_preview = message[:200] if isinstance(message, str) else message[:200].decode('utf-8', errors='replace')
|
||||
logger.debug(f"[DATA:{subscription_name}] Raw message: {msg_preview}...")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
error_msg = "Connection or authentication timeout"
|
||||
logger.error(f"[WEBSOCKET:{subscription_name}] {error_msg}")
|
||||
self.last_error[subscription_name] = error_msg
|
||||
self.connection_states[subscription_name] = "timeout"
|
||||
|
||||
|
||||
except websockets.exceptions.ConnectionClosed as e:
|
||||
error_msg = f"WebSocket connection closed: {e}"
|
||||
logger.warning(f"[WEBSOCKET:{subscription_name}] {error_msg}")
|
||||
self.last_error[subscription_name] = error_msg
|
||||
self.connection_states[subscription_name] = "disconnected"
|
||||
|
||||
|
||||
except websockets.exceptions.InvalidURI as e:
|
||||
error_msg = f"Invalid WebSocket URI: {e}"
|
||||
logger.error(f"[WEBSOCKET:{subscription_name}] {error_msg}")
|
||||
self.last_error[subscription_name] = error_msg
|
||||
self.connection_states[subscription_name] = "invalid_uri"
|
||||
break # Don't retry on invalid URI
|
||||
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error: {e}"
|
||||
logger.error(f"[WEBSOCKET:{subscription_name}] {error_msg}")
|
||||
self.last_error[subscription_name] = error_msg
|
||||
self.connection_states[subscription_name] = "error"
|
||||
|
||||
|
||||
# Calculate backoff delay
|
||||
retry_delay = min(retry_delay * 1.5, max_retry_delay)
|
||||
logger.info(f"[WEBSOCKET:{subscription_name}] Reconnecting in {retry_delay:.1f} seconds...")
|
||||
self.connection_states[subscription_name] = "reconnecting"
|
||||
await asyncio.sleep(retry_delay)
|
||||
|
||||
def get_resource_data(self, resource_name: str) -> Optional[Dict[str, Any]]:
|
||||
|
||||
def get_resource_data(self, resource_name: str) -> dict[str, Any] | None:
|
||||
"""Get current resource data with enhanced logging."""
|
||||
logger.debug(f"[RESOURCE:{resource_name}] Resource data requested")
|
||||
|
||||
|
||||
if resource_name in self.resource_data:
|
||||
data = self.resource_data[resource_name]
|
||||
age_seconds = (datetime.now() - data.last_updated).total_seconds()
|
||||
@@ -344,17 +349,17 @@ class SubscriptionManager:
|
||||
else:
|
||||
logger.debug(f"[RESOURCE:{resource_name}] No data available")
|
||||
return None
|
||||
|
||||
def list_active_subscriptions(self) -> List[str]:
|
||||
|
||||
def list_active_subscriptions(self) -> list[str]:
|
||||
"""List all active subscriptions."""
|
||||
active = list(self.active_subscriptions.keys())
|
||||
logger.debug(f"[SUBSCRIPTION_MANAGER] Active subscriptions: {active}")
|
||||
return active
|
||||
|
||||
def get_subscription_status(self) -> Dict[str, Dict[str, Any]]:
|
||||
|
||||
def get_subscription_status(self) -> dict[str, dict[str, Any]]:
|
||||
"""Get detailed status of all subscriptions for diagnostics."""
|
||||
status = {}
|
||||
|
||||
|
||||
for sub_name, config in self.subscription_configs.items():
|
||||
sub_status = {
|
||||
"config": {
|
||||
@@ -369,7 +374,7 @@ class SubscriptionManager:
|
||||
"last_error": self.last_error.get(sub_name, None)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Add data info if available
|
||||
if sub_name in self.resource_data:
|
||||
data_info = self.resource_data[sub_name]
|
||||
@@ -381,12 +386,12 @@ class SubscriptionManager:
|
||||
}
|
||||
else:
|
||||
sub_status["data"] = {"available": False}
|
||||
|
||||
|
||||
status[sub_name] = sub_status
|
||||
|
||||
|
||||
logger.debug(f"[SUBSCRIPTION_MANAGER] Generated status for {len(status)} subscriptions")
|
||||
return status
|
||||
|
||||
|
||||
# Global subscription manager instance
|
||||
subscription_manager = SubscriptionManager()
|
||||
subscription_manager = SubscriptionManager()
|
||||
|
||||
@@ -13,18 +13,17 @@ from fastmcp import FastMCP
|
||||
from ..config.logging import logger
|
||||
from .manager import subscription_manager
|
||||
|
||||
|
||||
# Global flag to track subscription startup
|
||||
_subscriptions_started = False
|
||||
|
||||
|
||||
async def ensure_subscriptions_started():
|
||||
async def ensure_subscriptions_started() -> None:
|
||||
"""Ensure subscriptions are started, called from async context."""
|
||||
global _subscriptions_started
|
||||
|
||||
|
||||
if _subscriptions_started:
|
||||
return
|
||||
|
||||
|
||||
logger.info("[STARTUP] First async operation detected, starting subscriptions...")
|
||||
try:
|
||||
await autostart_subscriptions()
|
||||
@@ -34,17 +33,17 @@ async def ensure_subscriptions_started():
|
||||
logger.error(f"[STARTUP] Failed to start subscriptions: {e}", exc_info=True)
|
||||
|
||||
|
||||
async def autostart_subscriptions():
|
||||
async def autostart_subscriptions() -> None:
|
||||
"""Auto-start all subscriptions marked for auto-start in SubscriptionManager."""
|
||||
logger.info("[AUTOSTART] Initiating subscription auto-start process...")
|
||||
|
||||
|
||||
try:
|
||||
# Use the new SubscriptionManager auto-start method
|
||||
await subscription_manager.auto_start_all_subscriptions()
|
||||
logger.info("[AUTOSTART] Auto-start process completed successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"[AUTOSTART] Failed during auto-start process: {e}", exc_info=True)
|
||||
|
||||
|
||||
# Optional log file subscription
|
||||
log_path = os.getenv("UNRAID_AUTOSTART_LOG_PATH")
|
||||
if log_path is None:
|
||||
@@ -53,13 +52,13 @@ async def autostart_subscriptions():
|
||||
if Path(default_path).exists():
|
||||
log_path = default_path
|
||||
logger.info(f"[AUTOSTART] Using default log path: {default_path}")
|
||||
|
||||
|
||||
if log_path:
|
||||
try:
|
||||
logger.info(f"[AUTOSTART] Starting log file subscription for: {log_path}")
|
||||
config = subscription_manager.subscription_configs.get("logFileSubscription")
|
||||
if config:
|
||||
await subscription_manager.start_subscription("logFileSubscription", config["query"], {"path": log_path})
|
||||
await subscription_manager.start_subscription("logFileSubscription", str(config["query"]), {"path": log_path})
|
||||
logger.info(f"[AUTOSTART] Log file subscription started for: {log_path}")
|
||||
else:
|
||||
logger.error("[AUTOSTART] logFileSubscription config not found")
|
||||
@@ -69,13 +68,13 @@ async def autostart_subscriptions():
|
||||
logger.info("[AUTOSTART] No log file path configured for auto-start")
|
||||
|
||||
|
||||
def register_subscription_resources(mcp: FastMCP):
|
||||
def register_subscription_resources(mcp: FastMCP) -> None:
|
||||
"""Register all subscription resources with the FastMCP instance.
|
||||
|
||||
|
||||
Args:
|
||||
mcp: FastMCP instance to register resources with
|
||||
"""
|
||||
|
||||
|
||||
@mcp.resource("unraid://logs/stream")
|
||||
async def logs_stream_resource() -> str:
|
||||
"""Real-time log stream data from subscription."""
|
||||
@@ -88,4 +87,4 @@ def register_subscription_resources(mcp: FastMCP):
|
||||
"message": "Subscriptions auto-start on server boot. If this persists, check server logs for WebSocket/auth issues."
|
||||
})
|
||||
|
||||
logger.info("Subscription resources registered successfully")
|
||||
logger.info("Subscription resources registered successfully")
|
||||
|
||||
@@ -1 +1 @@
|
||||
"""MCP tools organized by functional domain."""
|
||||
"""MCP tools organized by functional domain."""
|
||||
|
||||
@@ -65,7 +65,7 @@ def get_available_container_names(containers: list[dict[str, Any]]) -> list[str]
|
||||
return names
|
||||
|
||||
|
||||
def register_docker_tools(mcp: FastMCP):
|
||||
def register_docker_tools(mcp: FastMCP) -> None:
|
||||
"""Register all Docker tools with the FastMCP instance.
|
||||
|
||||
Args:
|
||||
@@ -97,11 +97,12 @@ def register_docker_tools(mcp: FastMCP):
|
||||
logger.info("Executing list_docker_containers tool")
|
||||
response_data = await make_graphql_request(query)
|
||||
if response_data.get("docker"):
|
||||
return response_data["docker"].get("containers", [])
|
||||
containers = response_data["docker"].get("containers", [])
|
||||
return list(containers) if isinstance(containers, list) else []
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error in list_docker_containers: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to list Docker containers: {str(e)}")
|
||||
raise ToolError(f"Failed to list Docker containers: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def manage_docker_container(container_id: str, action: str) -> dict[str, Any]:
|
||||
@@ -161,7 +162,7 @@ def register_docker_tools(mcp: FastMCP):
|
||||
containers = list_response["docker"].get("containers", [])
|
||||
resolved_container = find_container_by_identifier(container_id, containers)
|
||||
if resolved_container:
|
||||
actual_container_id = resolved_container.get("id")
|
||||
actual_container_id = str(resolved_container.get("id", ""))
|
||||
logger.info(f"Resolved '{container_id}' to container ID: {actual_container_id}")
|
||||
else:
|
||||
available_names = get_available_container_names(containers)
|
||||
@@ -309,7 +310,7 @@ def register_docker_tools(mcp: FastMCP):
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in manage_docker_container ({action}): {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to {action} Docker container: {str(e)}")
|
||||
raise ToolError(f"Failed to {action} Docker container: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_docker_container_details(container_identifier: str) -> dict[str, Any]:
|
||||
@@ -382,6 +383,6 @@ def register_docker_tools(mcp: FastMCP):
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_docker_container_details: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve Docker container details: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve Docker container details: {str(e)}") from e
|
||||
|
||||
logger.info("Docker tools registered successfully")
|
||||
|
||||
@@ -7,30 +7,29 @@ notifications, Docker services, and API responsiveness.
|
||||
|
||||
import datetime
|
||||
import time
|
||||
from typing import Any, Dict
|
||||
from typing import Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_URL, UNRAID_MCP_HOST, UNRAID_MCP_PORT, UNRAID_MCP_TRANSPORT
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError
|
||||
|
||||
|
||||
def register_health_tools(mcp: FastMCP):
|
||||
def register_health_tools(mcp: FastMCP) -> None:
|
||||
"""Register all health tools with the FastMCP instance.
|
||||
|
||||
|
||||
Args:
|
||||
mcp: FastMCP instance to register tools with
|
||||
"""
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def health_check() -> Dict[str, Any]:
|
||||
async def health_check() -> dict[str, Any]:
|
||||
"""Returns comprehensive health status of the Unraid MCP server and system for monitoring purposes."""
|
||||
start_time = time.time()
|
||||
health_status = "healthy"
|
||||
issues = []
|
||||
|
||||
|
||||
try:
|
||||
# Enhanced health check with multiple system components
|
||||
comprehensive_query = """
|
||||
@@ -58,10 +57,10 @@ def register_health_tools(mcp: FastMCP):
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
response_data = await make_graphql_request(comprehensive_query)
|
||||
api_latency = round((time.time() - start_time) * 1000, 2) # ms
|
||||
|
||||
|
||||
# Base health info
|
||||
health_info = {
|
||||
"status": health_status,
|
||||
@@ -76,14 +75,14 @@ def register_health_tools(mcp: FastMCP):
|
||||
"process_uptime_seconds": time.time() - start_time # Rough estimate
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if not response_data:
|
||||
health_status = "unhealthy"
|
||||
issues.append("No response from Unraid API")
|
||||
health_info["status"] = health_status
|
||||
health_info["issues"] = issues
|
||||
return health_info
|
||||
|
||||
|
||||
# System info analysis
|
||||
info = response_data.get("info", {})
|
||||
if info:
|
||||
@@ -98,7 +97,7 @@ def register_health_tools(mcp: FastMCP):
|
||||
else:
|
||||
health_status = "degraded"
|
||||
issues.append("Unable to retrieve system info")
|
||||
|
||||
|
||||
# Array health analysis
|
||||
array_info = response_data.get("array", {})
|
||||
if array_info:
|
||||
@@ -113,7 +112,7 @@ def register_health_tools(mcp: FastMCP):
|
||||
else:
|
||||
health_status = "warning"
|
||||
issues.append("Unable to retrieve array status")
|
||||
|
||||
|
||||
# Notifications analysis
|
||||
notifications = response_data.get("notifications", {})
|
||||
if notifications and notifications.get("overview"):
|
||||
@@ -121,32 +120,32 @@ def register_health_tools(mcp: FastMCP):
|
||||
alert_count = unread.get("alert", 0)
|
||||
warning_count = unread.get("warning", 0)
|
||||
total_unread = unread.get("total", 0)
|
||||
|
||||
|
||||
health_info["notifications"] = {
|
||||
"unread_total": total_unread,
|
||||
"unread_alerts": alert_count,
|
||||
"unread_warnings": warning_count,
|
||||
"has_critical_notifications": alert_count > 0
|
||||
}
|
||||
|
||||
|
||||
if alert_count > 0:
|
||||
health_status = "warning"
|
||||
issues.append(f"{alert_count} unread alert notification(s)")
|
||||
|
||||
# Docker services analysis
|
||||
|
||||
# Docker services analysis
|
||||
docker_info = response_data.get("docker", {})
|
||||
if docker_info and docker_info.get("containers"):
|
||||
containers = docker_info["containers"]
|
||||
running_containers = [c for c in containers if c.get("state") == "running"]
|
||||
stopped_containers = [c for c in containers if c.get("state") == "exited"]
|
||||
|
||||
|
||||
health_info["docker_services"] = {
|
||||
"total_containers": len(containers),
|
||||
"running_containers": len(running_containers),
|
||||
"stopped_containers": len(stopped_containers),
|
||||
"containers_healthy": len([c for c in containers if c.get("status", "").startswith("Up")])
|
||||
}
|
||||
|
||||
|
||||
# API performance assessment
|
||||
if api_latency > 5000: # > 5 seconds
|
||||
health_status = "warning"
|
||||
@@ -154,20 +153,20 @@ def register_health_tools(mcp: FastMCP):
|
||||
elif api_latency > 10000: # > 10 seconds
|
||||
health_status = "degraded"
|
||||
issues.append(f"Very high API latency: {api_latency}ms")
|
||||
|
||||
|
||||
# Final status determination
|
||||
health_info["status"] = health_status
|
||||
if issues:
|
||||
health_info["issues"] = issues
|
||||
|
||||
|
||||
# Add performance metrics
|
||||
health_info["performance"] = {
|
||||
"api_response_time_ms": api_latency,
|
||||
"health_check_duration_ms": round((time.time() - start_time) * 1000, 2)
|
||||
}
|
||||
|
||||
|
||||
return health_info
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Health check failed: {e}")
|
||||
return {
|
||||
@@ -184,4 +183,4 @@ def register_health_tools(mcp: FastMCP):
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("Health tools registered successfully")
|
||||
logger.info("Health tools registered successfully")
|
||||
|
||||
@@ -5,7 +5,7 @@ remotes, getting configuration forms, creating new remotes, and deleting remotes
|
||||
for various cloud storage providers (S3, Google Drive, Dropbox, FTP, etc.).
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
@@ -14,15 +14,15 @@ from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError
|
||||
|
||||
|
||||
def register_rclone_tools(mcp: FastMCP):
|
||||
def register_rclone_tools(mcp: FastMCP) -> None:
|
||||
"""Register all RClone tools with the FastMCP instance.
|
||||
|
||||
|
||||
Args:
|
||||
mcp: FastMCP instance to register tools with
|
||||
"""
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def list_rclone_remotes() -> List[Dict[str, Any]]:
|
||||
async def list_rclone_remotes() -> list[dict[str, Any]]:
|
||||
"""Retrieves all configured RClone remotes with their configuration details."""
|
||||
try:
|
||||
query = """
|
||||
@@ -37,25 +37,25 @@ def register_rclone_tools(mcp: FastMCP):
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
response_data = await make_graphql_request(query)
|
||||
|
||||
|
||||
if "rclone" in response_data and "remotes" in response_data["rclone"]:
|
||||
remotes = response_data["rclone"]["remotes"]
|
||||
logger.info(f"Retrieved {len(remotes)} RClone remotes")
|
||||
return remotes
|
||||
|
||||
return list(remotes) if isinstance(remotes, list) else []
|
||||
|
||||
return []
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to list RClone remotes: {str(e)}")
|
||||
raise ToolError(f"Failed to list RClone remotes: {str(e)}")
|
||||
raise ToolError(f"Failed to list RClone remotes: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_rclone_config_form(provider_type: Optional[str] = None) -> Dict[str, Any]:
|
||||
async def get_rclone_config_form(provider_type: str | None = None) -> dict[str, Any]:
|
||||
"""
|
||||
Get RClone configuration form schema for setting up new remotes.
|
||||
|
||||
|
||||
Args:
|
||||
provider_type: Optional provider type to get specific form (e.g., 's3', 'drive', 'dropbox')
|
||||
"""
|
||||
@@ -71,29 +71,29 @@ def register_rclone_tools(mcp: FastMCP):
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
variables = {}
|
||||
if provider_type:
|
||||
variables["formOptions"] = {"providerType": provider_type}
|
||||
|
||||
|
||||
response_data = await make_graphql_request(query, variables)
|
||||
|
||||
|
||||
if "rclone" in response_data and "configForm" in response_data["rclone"]:
|
||||
form_data = response_data["rclone"]["configForm"]
|
||||
logger.info(f"Retrieved RClone config form for {provider_type or 'general'}")
|
||||
return form_data
|
||||
|
||||
return dict(form_data) if isinstance(form_data, dict) else {}
|
||||
|
||||
raise ToolError("No RClone config form data received")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get RClone config form: {str(e)}")
|
||||
raise ToolError(f"Failed to get RClone config form: {str(e)}")
|
||||
raise ToolError(f"Failed to get RClone config form: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def create_rclone_remote(name: str, provider_type: str, config_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
async def create_rclone_remote(name: str, provider_type: str, config_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""
|
||||
Create a new RClone remote with the specified configuration.
|
||||
|
||||
|
||||
Args:
|
||||
name: Name for the new remote
|
||||
provider_type: Type of provider (e.g., 's3', 'drive', 'dropbox', 'ftp')
|
||||
@@ -111,7 +111,7 @@ def register_rclone_tools(mcp: FastMCP):
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
variables = {
|
||||
"input": {
|
||||
"name": name,
|
||||
@@ -119,9 +119,9 @@ def register_rclone_tools(mcp: FastMCP):
|
||||
"config": config_data
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
response_data = await make_graphql_request(mutation, variables)
|
||||
|
||||
|
||||
if "rclone" in response_data and "createRCloneRemote" in response_data["rclone"]:
|
||||
remote_info = response_data["rclone"]["createRCloneRemote"]
|
||||
logger.info(f"Successfully created RClone remote: {name}")
|
||||
@@ -130,18 +130,18 @@ def register_rclone_tools(mcp: FastMCP):
|
||||
"message": f"RClone remote '{name}' created successfully",
|
||||
"remote": remote_info
|
||||
}
|
||||
|
||||
|
||||
raise ToolError("Failed to create RClone remote")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create RClone remote {name}: {str(e)}")
|
||||
raise ToolError(f"Failed to create RClone remote {name}: {str(e)}")
|
||||
raise ToolError(f"Failed to create RClone remote {name}: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def delete_rclone_remote(name: str) -> Dict[str, Any]:
|
||||
async def delete_rclone_remote(name: str) -> dict[str, Any]:
|
||||
"""
|
||||
Delete an existing RClone remote by name.
|
||||
|
||||
|
||||
Args:
|
||||
name: Name of the remote to delete
|
||||
"""
|
||||
@@ -153,26 +153,26 @@ def register_rclone_tools(mcp: FastMCP):
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
variables = {
|
||||
"input": {
|
||||
"name": name
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
response_data = await make_graphql_request(mutation, variables)
|
||||
|
||||
|
||||
if "rclone" in response_data and response_data["rclone"]["deleteRCloneRemote"]:
|
||||
logger.info(f"Successfully deleted RClone remote: {name}")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"RClone remote '{name}' deleted successfully"
|
||||
}
|
||||
|
||||
|
||||
raise ToolError(f"Failed to delete RClone remote '{name}'")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete RClone remote {name}: {str(e)}")
|
||||
raise ToolError(f"Failed to delete RClone remote {name}: {str(e)}")
|
||||
raise ToolError(f"Failed to delete RClone remote {name}: {str(e)}") from e
|
||||
|
||||
logger.info("RClone tools registered successfully")
|
||||
logger.info("RClone tools registered successfully")
|
||||
|
||||
@@ -5,7 +5,7 @@ log files, physical disks with SMART data, and system storage operations
|
||||
with custom timeout configurations for disk-intensive operations.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
from fastmcp import FastMCP
|
||||
@@ -15,15 +15,15 @@ from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError
|
||||
|
||||
|
||||
def register_storage_tools(mcp: FastMCP):
|
||||
def register_storage_tools(mcp: FastMCP) -> None:
|
||||
"""Register all storage tools with the FastMCP instance.
|
||||
|
||||
|
||||
Args:
|
||||
mcp: FastMCP instance to register tools with
|
||||
"""
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_shares_info() -> List[Dict[str, Any]]:
|
||||
async def get_shares_info() -> list[dict[str, Any]]:
|
||||
"""Retrieves information about user shares."""
|
||||
query = """
|
||||
query GetSharesInfo {
|
||||
@@ -50,13 +50,14 @@ def register_storage_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info("Executing get_shares_info tool")
|
||||
response_data = await make_graphql_request(query)
|
||||
return response_data.get("shares", [])
|
||||
shares = response_data.get("shares", [])
|
||||
return list(shares) if isinstance(shares, list) else []
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_shares_info: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve shares information: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve shares information: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_notifications_overview() -> Dict[str, Any]:
|
||||
async def get_notifications_overview() -> dict[str, Any]:
|
||||
"""Retrieves an overview of system notifications (unread and archive counts by severity)."""
|
||||
query = """
|
||||
query GetNotificationsOverview {
|
||||
@@ -72,19 +73,20 @@ def register_storage_tools(mcp: FastMCP):
|
||||
logger.info("Executing get_notifications_overview tool")
|
||||
response_data = await make_graphql_request(query)
|
||||
if response_data.get("notifications"):
|
||||
return response_data["notifications"].get("overview", {})
|
||||
overview = response_data["notifications"].get("overview", {})
|
||||
return dict(overview) if isinstance(overview, dict) else {}
|
||||
return {}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_notifications_overview: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve notifications overview: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve notifications overview: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def list_notifications(
|
||||
type: str,
|
||||
offset: int,
|
||||
limit: int,
|
||||
importance: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
type: str,
|
||||
offset: int,
|
||||
limit: int,
|
||||
importance: str | None = None
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Lists notifications with filtering. Type: UNREAD/ARCHIVE. Importance: INFO/WARNING/ALERT."""
|
||||
query = """
|
||||
query ListNotifications($filter: NotificationFilter!) {
|
||||
@@ -114,19 +116,20 @@ def register_storage_tools(mcp: FastMCP):
|
||||
# Remove null importance from variables if not provided, as GraphQL might be strict
|
||||
if not importance:
|
||||
del variables["filter"]["importance"]
|
||||
|
||||
|
||||
try:
|
||||
logger.info(f"Executing list_notifications: type={type}, offset={offset}, limit={limit}, importance={importance}")
|
||||
response_data = await make_graphql_request(query, variables)
|
||||
if response_data.get("notifications"):
|
||||
return response_data["notifications"].get("list", [])
|
||||
notifications_list = response_data["notifications"].get("list", [])
|
||||
return list(notifications_list) if isinstance(notifications_list, list) else []
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error in list_notifications: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to list notifications: {str(e)}")
|
||||
raise ToolError(f"Failed to list notifications: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def list_available_log_files() -> List[Dict[str, Any]]:
|
||||
async def list_available_log_files() -> list[dict[str, Any]]:
|
||||
"""Lists all available log files that can be queried."""
|
||||
query = """
|
||||
query ListLogFiles {
|
||||
@@ -141,13 +144,14 @@ def register_storage_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info("Executing list_available_log_files tool")
|
||||
response_data = await make_graphql_request(query)
|
||||
return response_data.get("logFiles", [])
|
||||
log_files = response_data.get("logFiles", [])
|
||||
return list(log_files) if isinstance(log_files, list) else []
|
||||
except Exception as e:
|
||||
logger.error(f"Error in list_available_log_files: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to list available log files: {str(e)}")
|
||||
raise ToolError(f"Failed to list available log files: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_logs(log_file_path: str, tail_lines: int = 100) -> Dict[str, Any]:
|
||||
async def get_logs(log_file_path: str, tail_lines: int = 100) -> dict[str, Any]:
|
||||
"""Retrieves content from a specific log file, defaulting to the last 100 lines."""
|
||||
# The Unraid GraphQL API Query.logFile takes 'lines' and 'startLine'.
|
||||
# To implement 'tail_lines', we would ideally need to know the total lines first,
|
||||
@@ -158,7 +162,7 @@ def register_storage_tools(mcp: FastMCP):
|
||||
# If not, this tool might need to be smarter or the API might not directly support 'tail' easily.
|
||||
# The SDL for LogFileContent implies it returns startLine, so it seems aware of ranges.
|
||||
|
||||
# Let's try fetching with just 'lines' to see if it acts as a tail,
|
||||
# Let's try fetching with just 'lines' to see if it acts as a tail,
|
||||
# or if we need Query.logFiles first to get totalLines for calculation.
|
||||
# For robust tailing, one might need to fetch totalLines first, then calculate start_line for the tail.
|
||||
# Simplified: query for the last 'tail_lines'. If the API doesn't support tailing this way, we may need adjustment.
|
||||
@@ -178,16 +182,17 @@ def register_storage_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info(f"Executing get_logs for {log_file_path}, tail_lines={tail_lines}")
|
||||
response_data = await make_graphql_request(query, variables)
|
||||
return response_data.get("logFile", {})
|
||||
log_file = response_data.get("logFile", {})
|
||||
return dict(log_file) if isinstance(log_file, dict) else {}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_logs for {log_file_path}: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve logs from {log_file_path}: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve logs from {log_file_path}: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def list_physical_disks() -> List[Dict[str, Any]]:
|
||||
async def list_physical_disks() -> list[dict[str, Any]]:
|
||||
"""Lists all physical disks recognized by the Unraid system."""
|
||||
# Querying an extremely minimal set of fields for diagnostics
|
||||
query = """
|
||||
query = """
|
||||
query ListPhysicalDisksMinimal {
|
||||
disks {
|
||||
id
|
||||
@@ -199,15 +204,16 @@ def register_storage_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info("Executing list_physical_disks tool with minimal query and increased timeout")
|
||||
# Increased read timeout for this potentially slow query
|
||||
long_timeout = httpx.Timeout(10.0, read=90.0, connect=5.0)
|
||||
long_timeout = httpx.Timeout(10.0, read=90.0, connect=5.0)
|
||||
response_data = await make_graphql_request(query, custom_timeout=long_timeout)
|
||||
return response_data.get("disks", [])
|
||||
disks = response_data.get("disks", [])
|
||||
return list(disks) if isinstance(disks, list) else []
|
||||
except Exception as e:
|
||||
logger.error(f"Error in list_physical_disks: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to list physical disks: {str(e)}")
|
||||
raise ToolError(f"Failed to list physical disks: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_disk_details(disk_id: str) -> Dict[str, Any]:
|
||||
async def get_disk_details(disk_id: str) -> dict[str, Any]:
|
||||
"""Retrieves detailed SMART information and partition data for a specific physical disk."""
|
||||
# Enhanced query with more comprehensive disk information
|
||||
query = """
|
||||
@@ -227,19 +233,20 @@ def register_storage_tools(mcp: FastMCP):
|
||||
logger.info(f"Executing get_disk_details for disk: {disk_id}")
|
||||
response_data = await make_graphql_request(query, variables)
|
||||
raw_disk = response_data.get("disk", {})
|
||||
|
||||
|
||||
if not raw_disk:
|
||||
raise ToolError(f"Disk '{disk_id}' not found")
|
||||
|
||||
|
||||
# Process disk information for human-readable output
|
||||
def format_bytes(bytes_value):
|
||||
if bytes_value is None: return "N/A"
|
||||
bytes_value = int(bytes_value)
|
||||
def format_bytes(bytes_value: int | None) -> str:
|
||||
if bytes_value is None:
|
||||
return "N/A"
|
||||
value = float(int(bytes_value))
|
||||
for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB']:
|
||||
if bytes_value < 1024.0:
|
||||
return f"{bytes_value:.2f} {unit}"
|
||||
bytes_value /= 1024.0
|
||||
return f"{bytes_value:.2f} EB"
|
||||
if value < 1024.0:
|
||||
return f"{value:.2f} {unit}"
|
||||
value /= 1024.0
|
||||
return f"{value:.2f} EB"
|
||||
|
||||
summary = {
|
||||
'disk_id': raw_disk.get('id'),
|
||||
@@ -256,15 +263,15 @@ def register_storage_tools(mcp: FastMCP):
|
||||
'partition_count': len(raw_disk.get('partitions', [])),
|
||||
'total_partition_size': format_bytes(sum(p.get('size', 0) for p in raw_disk.get('partitions', []) if p.get('size')))
|
||||
}
|
||||
|
||||
|
||||
return {
|
||||
'summary': summary,
|
||||
'partitions': raw_disk.get('partitions', []),
|
||||
'details': raw_disk
|
||||
}
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_disk_details for {disk_id}: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve disk details for {disk_id}: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve disk details for {disk_id}: {str(e)}") from e
|
||||
|
||||
logger.info("Storage tools registered successfully")
|
||||
logger.info("Storage tools registered successfully")
|
||||
|
||||
@@ -5,7 +5,7 @@ array status with health analysis, network configuration, registration info,
|
||||
and system variables.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict
|
||||
from typing import Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
@@ -15,7 +15,7 @@ from ..core.exceptions import ToolError
|
||||
|
||||
|
||||
# Standalone functions for use by subscription resources
|
||||
async def _get_system_info() -> Dict[str, Any]:
|
||||
async def _get_system_info() -> dict[str, Any]:
|
||||
"""Standalone function to get system info - used by subscriptions and tools."""
|
||||
query = """
|
||||
query GetSystemInfo {
|
||||
@@ -44,20 +44,20 @@ async def _get_system_info() -> Dict[str, Any]:
|
||||
raise ToolError("No system info returned from Unraid API")
|
||||
|
||||
# Process for human-readable output
|
||||
summary = {}
|
||||
summary: dict[str, Any] = {}
|
||||
if raw_info.get('os'):
|
||||
os_info = raw_info['os']
|
||||
summary['os'] = f"{os_info.get('distro', '')} {os_info.get('release', '')} ({os_info.get('platform', '')}, {os_info.get('arch', '')})"
|
||||
summary['hostname'] = os_info.get('hostname')
|
||||
summary['uptime'] = os_info.get('uptime')
|
||||
|
||||
summary['uptime'] = os_info.get('uptime')
|
||||
|
||||
if raw_info.get('cpu'):
|
||||
cpu_info = raw_info['cpu']
|
||||
summary['cpu'] = f"{cpu_info.get('manufacturer', '')} {cpu_info.get('brand', '')} ({cpu_info.get('cores')} cores, {cpu_info.get('threads')} threads)"
|
||||
|
||||
|
||||
if raw_info.get('memory') and raw_info['memory'].get('layout'):
|
||||
mem_layout = raw_info['memory']['layout']
|
||||
summary['memory_layout_details'] = [] # Renamed for clarity
|
||||
summary['memory_layout_details'] = [] # Renamed for clarity
|
||||
# The API is not returning 'size' for individual sticks in the layout, even if queried.
|
||||
# So, we cannot calculate total from layout currently.
|
||||
for stick in mem_layout:
|
||||
@@ -74,10 +74,10 @@ async def _get_system_info() -> Dict[str, Any]:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_system_info: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve system information: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve system information: {str(e)}") from e
|
||||
|
||||
|
||||
async def _get_array_status() -> Dict[str, Any]:
|
||||
async def _get_array_status() -> dict[str, Any]:
|
||||
"""Standalone function to get array status - used by subscriptions and tools."""
|
||||
query = """
|
||||
query GetArrayStatus {
|
||||
@@ -102,34 +102,38 @@ async def _get_array_status() -> Dict[str, Any]:
|
||||
if not raw_array_info:
|
||||
raise ToolError("No array information returned from Unraid API")
|
||||
|
||||
summary = {}
|
||||
summary: dict[str, Any] = {}
|
||||
summary['state'] = raw_array_info.get('state')
|
||||
|
||||
if raw_array_info.get('capacity') and raw_array_info['capacity'].get('kilobytes'):
|
||||
kb_cap = raw_array_info['capacity']['kilobytes']
|
||||
# Helper to format KB into TB/GB/MB
|
||||
def format_kb(k):
|
||||
if k is None: return "N/A"
|
||||
def format_kb(k: Any) -> str:
|
||||
if k is None:
|
||||
return "N/A"
|
||||
k = int(k) # Values are strings in SDL for PrefixedID containing types like capacity
|
||||
if k >= 1024*1024*1024: return f"{k / (1024*1024*1024):.2f} TB"
|
||||
if k >= 1024*1024: return f"{k / (1024*1024):.2f} GB"
|
||||
if k >= 1024: return f"{k / 1024:.2f} MB"
|
||||
if k >= 1024*1024*1024:
|
||||
return f"{k / (1024*1024*1024):.2f} TB"
|
||||
if k >= 1024*1024:
|
||||
return f"{k / (1024*1024):.2f} GB"
|
||||
if k >= 1024:
|
||||
return f"{k / 1024:.2f} MB"
|
||||
return f"{k} KB"
|
||||
|
||||
summary['capacity_total'] = format_kb(kb_cap.get('total'))
|
||||
summary['capacity_used'] = format_kb(kb_cap.get('used'))
|
||||
summary['capacity_free'] = format_kb(kb_cap.get('free'))
|
||||
|
||||
|
||||
summary['num_parity_disks'] = len(raw_array_info.get('parities', []))
|
||||
summary['num_data_disks'] = len(raw_array_info.get('disks', []))
|
||||
summary['num_cache_pools'] = len(raw_array_info.get('caches', [])) # Note: caches are pools, not individual cache disks
|
||||
|
||||
# Enhanced: Add disk health summary
|
||||
def analyze_disk_health(disks, disk_type):
|
||||
def analyze_disk_health(disks: list[dict[str, Any]], disk_type: str) -> dict[str, int]:
|
||||
"""Analyze health status of disk arrays"""
|
||||
if not disks:
|
||||
return {}
|
||||
|
||||
|
||||
health_counts = {
|
||||
'healthy': 0,
|
||||
'failed': 0,
|
||||
@@ -138,12 +142,12 @@ async def _get_array_status() -> Dict[str, Any]:
|
||||
'warning': 0,
|
||||
'unknown': 0
|
||||
}
|
||||
|
||||
|
||||
for disk in disks:
|
||||
status = disk.get('status', '').upper()
|
||||
warning = disk.get('warning')
|
||||
critical = disk.get('critical')
|
||||
|
||||
|
||||
if status == 'DISK_OK':
|
||||
if warning or critical:
|
||||
health_counts['warning'] += 1
|
||||
@@ -157,7 +161,7 @@ async def _get_array_status() -> Dict[str, Any]:
|
||||
health_counts['new'] += 1
|
||||
else:
|
||||
health_counts['unknown'] += 1
|
||||
|
||||
|
||||
return health_counts
|
||||
|
||||
# Analyze health for each disk type
|
||||
@@ -168,12 +172,12 @@ async def _get_array_status() -> Dict[str, Any]:
|
||||
health_summary['data_health'] = analyze_disk_health(raw_array_info['disks'], 'data')
|
||||
if raw_array_info.get('caches'):
|
||||
health_summary['cache_health'] = analyze_disk_health(raw_array_info['caches'], 'cache')
|
||||
|
||||
|
||||
# Overall array health assessment
|
||||
total_failed = sum(h.get('failed', 0) for h in health_summary.values())
|
||||
total_missing = sum(h.get('missing', 0) for h in health_summary.values())
|
||||
total_warning = sum(h.get('warning', 0) for h in health_summary.values())
|
||||
|
||||
|
||||
if total_failed > 0:
|
||||
overall_health = "CRITICAL"
|
||||
elif total_missing > 0:
|
||||
@@ -182,7 +186,7 @@ async def _get_array_status() -> Dict[str, Any]:
|
||||
overall_health = "WARNING"
|
||||
else:
|
||||
overall_health = "HEALTHY"
|
||||
|
||||
|
||||
summary['overall_health'] = overall_health
|
||||
summary['health_summary'] = health_summary
|
||||
|
||||
@@ -190,28 +194,28 @@ async def _get_array_status() -> Dict[str, Any]:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_array_status: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve array status: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve array status: {str(e)}") from e
|
||||
|
||||
|
||||
def register_system_tools(mcp: FastMCP):
|
||||
def register_system_tools(mcp: FastMCP) -> None:
|
||||
"""Register all system tools with the FastMCP instance.
|
||||
|
||||
|
||||
Args:
|
||||
mcp: FastMCP instance to register tools with
|
||||
"""
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_system_info() -> Dict[str, Any]:
|
||||
async def get_system_info() -> dict[str, Any]:
|
||||
"""Retrieves comprehensive information about the Unraid system, OS, CPU, memory, and baseboard."""
|
||||
return await _get_system_info()
|
||||
|
||||
@mcp.tool()
|
||||
async def get_array_status() -> Dict[str, Any]:
|
||||
async def get_array_status() -> dict[str, Any]:
|
||||
"""Retrieves the current status of the Unraid storage array, including its state, capacity, and details of all disks."""
|
||||
return await _get_array_status()
|
||||
|
||||
@mcp.tool()
|
||||
async def get_network_config() -> Dict[str, Any]:
|
||||
async def get_network_config() -> dict[str, Any]:
|
||||
"""Retrieves network configuration details, including access URLs."""
|
||||
query = """
|
||||
query GetNetworkConfig {
|
||||
@@ -224,13 +228,14 @@ def register_system_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info("Executing get_network_config tool")
|
||||
response_data = await make_graphql_request(query)
|
||||
return response_data.get("network", {})
|
||||
network = response_data.get("network", {})
|
||||
return dict(network) if isinstance(network, dict) else {}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_network_config: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve network configuration: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve network configuration: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_registration_info() -> Dict[str, Any]:
|
||||
async def get_registration_info() -> dict[str, Any]:
|
||||
"""Retrieves Unraid registration details."""
|
||||
query = """
|
||||
query GetRegistrationInfo {
|
||||
@@ -247,13 +252,14 @@ def register_system_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info("Executing get_registration_info tool")
|
||||
response_data = await make_graphql_request(query)
|
||||
return response_data.get("registration", {})
|
||||
registration = response_data.get("registration", {})
|
||||
return dict(registration) if isinstance(registration, dict) else {}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_registration_info: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve registration information: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve registration information: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_connect_settings() -> Dict[str, Any]:
|
||||
async def get_connect_settings() -> dict[str, Any]:
|
||||
"""Retrieves settings related to Unraid Connect."""
|
||||
# Based on actual schema: settings.unified.values contains the JSON settings
|
||||
query = """
|
||||
@@ -268,7 +274,7 @@ def register_system_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info("Executing get_connect_settings tool")
|
||||
response_data = await make_graphql_request(query)
|
||||
|
||||
|
||||
# Navigate down to the unified settings values
|
||||
if response_data.get("settings") and response_data["settings"].get("unified"):
|
||||
values = response_data["settings"]["unified"].get("values", {})
|
||||
@@ -280,15 +286,15 @@ def register_system_tools(mcp: FastMCP):
|
||||
if 'connect' in key.lower() or key in ['accessType', 'forwardType', 'port']:
|
||||
connect_settings[key] = value
|
||||
return connect_settings if connect_settings else values
|
||||
return values
|
||||
return dict(values) if isinstance(values, dict) else {}
|
||||
return {}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_connect_settings: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve Unraid Connect settings: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve Unraid Connect settings: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_unraid_variables() -> Dict[str, Any]:
|
||||
"""Retrieves a selection of Unraid system variables and settings.
|
||||
async def get_unraid_variables() -> dict[str, Any]:
|
||||
"""Retrieves a selection of Unraid system variables and settings.
|
||||
Note: Many variables are omitted due to API type issues (Int overflow/NaN).
|
||||
"""
|
||||
# Querying a smaller, curated set of fields to avoid Int overflow and NaN issues
|
||||
@@ -377,9 +383,10 @@ def register_system_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info("Executing get_unraid_variables tool with a selective query")
|
||||
response_data = await make_graphql_request(query)
|
||||
return response_data.get("vars", {})
|
||||
vars_data = response_data.get("vars", {})
|
||||
return dict(vars_data) if isinstance(vars_data, dict) else {}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_unraid_variables: {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to retrieve Unraid variables: {str(e)}")
|
||||
raise ToolError(f"Failed to retrieve Unraid variables: {str(e)}") from e
|
||||
|
||||
logger.info("System tools registered successfully")
|
||||
logger.info("System tools registered successfully")
|
||||
|
||||
@@ -5,7 +5,7 @@ including listing VMs, VM operations (start/stop/pause/reboot/etc),
|
||||
and detailed VM information retrieval.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
@@ -14,17 +14,17 @@ from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError
|
||||
|
||||
|
||||
def register_vm_tools(mcp: FastMCP):
|
||||
def register_vm_tools(mcp: FastMCP) -> None:
|
||||
"""Register all VM tools with the FastMCP instance.
|
||||
|
||||
|
||||
Args:
|
||||
mcp: FastMCP instance to register tools with
|
||||
"""
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def list_vms() -> List[Dict[str, Any]]:
|
||||
async def list_vms() -> list[dict[str, Any]]:
|
||||
"""Lists all Virtual Machines (VMs) on the Unraid system and their current state.
|
||||
|
||||
|
||||
Returns:
|
||||
List of VM information dictionaries with UUID, name, and state
|
||||
"""
|
||||
@@ -48,7 +48,7 @@ def register_vm_tools(mcp: FastMCP):
|
||||
if response_data.get("vms") and response_data["vms"].get("domains"):
|
||||
vms = response_data["vms"]["domains"]
|
||||
logger.info(f"Found {len(vms)} VMs")
|
||||
return vms
|
||||
return list(vms) if isinstance(vms, list) else []
|
||||
else:
|
||||
logger.info("No VMs found in domains field")
|
||||
return []
|
||||
@@ -56,18 +56,18 @@ def register_vm_tools(mcp: FastMCP):
|
||||
logger.error(f"Error in list_vms: {e}", exc_info=True)
|
||||
error_msg = str(e)
|
||||
if "VMs are not available" in error_msg:
|
||||
raise ToolError("VMs are not available on this Unraid server. This could mean: 1) VM support is not enabled, 2) VM service is not running, or 3) no VMs are configured. Check Unraid VM settings.")
|
||||
raise ToolError("VMs are not available on this Unraid server. This could mean: 1) VM support is not enabled, 2) VM service is not running, or 3) no VMs are configured. Check Unraid VM settings.") from e
|
||||
else:
|
||||
raise ToolError(f"Failed to list virtual machines: {error_msg}")
|
||||
raise ToolError(f"Failed to list virtual machines: {error_msg}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def manage_vm(vm_uuid: str, action: str) -> Dict[str, Any]:
|
||||
async def manage_vm(vm_uuid: str, action: str) -> dict[str, Any]:
|
||||
"""Manages a VM: start, stop, pause, resume, force_stop, reboot, reset. Uses VM UUID.
|
||||
|
||||
|
||||
Args:
|
||||
vm_uuid: UUID of the VM to manage
|
||||
action: Action to perform - one of: start, stop, pause, resume, forceStop, reboot, reset
|
||||
|
||||
|
||||
Returns:
|
||||
Dict containing operation success status and details
|
||||
"""
|
||||
@@ -95,15 +95,15 @@ def register_vm_tools(mcp: FastMCP):
|
||||
raise ToolError(f"Failed to {action} VM or unexpected response structure.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in manage_vm ({action}): {e}", exc_info=True)
|
||||
raise ToolError(f"Failed to {action} virtual machine: {str(e)}")
|
||||
raise ToolError(f"Failed to {action} virtual machine: {str(e)}") from e
|
||||
|
||||
@mcp.tool()
|
||||
async def get_vm_details(vm_identifier: str) -> Dict[str, Any]:
|
||||
async def get_vm_details(vm_identifier: str) -> dict[str, Any]:
|
||||
"""Retrieves detailed information for a specific VM by its UUID or name.
|
||||
|
||||
|
||||
Args:
|
||||
vm_identifier: VM UUID or name to retrieve details for
|
||||
|
||||
|
||||
Returns:
|
||||
Dict containing detailed VM information
|
||||
"""
|
||||
@@ -129,20 +129,20 @@ def register_vm_tools(mcp: FastMCP):
|
||||
try:
|
||||
logger.info(f"Executing get_vm_details for identifier: {vm_identifier}")
|
||||
response_data = await make_graphql_request(query)
|
||||
|
||||
|
||||
if response_data.get("vms"):
|
||||
vms_data = response_data["vms"]
|
||||
# Try to get VMs from either domains or domain field
|
||||
vms = vms_data.get("domains") or vms_data.get("domain") or []
|
||||
|
||||
|
||||
if vms:
|
||||
for vm_data in vms:
|
||||
if (vm_data.get("uuid") == vm_identifier or
|
||||
vm_data.get("id") == vm_identifier or
|
||||
if (vm_data.get("uuid") == vm_identifier or
|
||||
vm_data.get("id") == vm_identifier or
|
||||
vm_data.get("name") == vm_identifier):
|
||||
logger.info(f"Found VM {vm_identifier}")
|
||||
return vm_data
|
||||
|
||||
return dict(vm_data) if isinstance(vm_data, dict) else {}
|
||||
|
||||
logger.warning(f"VM with identifier '{vm_identifier}' not found.")
|
||||
available_vms = [f"{vm.get('name')} (UUID: {vm.get('uuid')}, ID: {vm.get('id')})" for vm in vms]
|
||||
raise ToolError(f"VM '{vm_identifier}' not found. Available VMs: {', '.join(available_vms)}")
|
||||
@@ -155,8 +155,8 @@ def register_vm_tools(mcp: FastMCP):
|
||||
logger.error(f"Error in get_vm_details: {e}", exc_info=True)
|
||||
error_msg = str(e)
|
||||
if "VMs are not available" in error_msg:
|
||||
raise ToolError("VMs are not available on this Unraid server. This could mean: 1) VM support is not enabled, 2) VM service is not running, or 3) no VMs are configured. Check Unraid VM settings.")
|
||||
raise ToolError("VMs are not available on this Unraid server. This could mean: 1) VM support is not enabled, 2) VM service is not running, or 3) no VMs are configured. Check Unraid VM settings.") from e
|
||||
else:
|
||||
raise ToolError(f"Failed to retrieve VM details: {error_msg}")
|
||||
raise ToolError(f"Failed to retrieve VM details: {error_msg}") from e
|
||||
|
||||
logger.info("VM tools registered successfully")
|
||||
logger.info("VM tools registered successfully")
|
||||
|
||||
Reference in New Issue
Block a user