From 316193c04bade07a0b9db736ea5c4675098233a1 Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Wed, 18 Feb 2026 01:02:13 -0500 Subject: [PATCH 01/34] refactor: comprehensive code review fixes across 31 files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses all critical, high, medium, and low issues from full codebase review. 494 tests pass, ruff clean, ty type-check clean. Security: - Add tool_error_handler context manager (exceptions.py) — standardised error handling, eliminates 11 bare except-reraise patterns - Remove unused exception subclasses (ConfigurationError, UnraidAPIError, SubscriptionError, ValidationError, IdempotentOperationError) - Harden GraphQL subscription query validator with allow-list and forbidden-keyword regex (diagnostics.py) - Add input validation for rclone create_remote config_data: injection, path-traversal, and key-count limits (rclone.py) - Validate notifications importance enum before GraphQL request (notifications.py) - Sanitise HTTP/network/JSON error messages — no raw exception strings leaked to clients (client.py) - Strip path/creds from displayed API URL via _safe_display_url (health.py) - Enable Ruff S (bandit) rule category in pyproject.toml - Harden container mutations to strict-only matching — no fuzzy/substring for destructive operations (docker.py) Performance: - Token-bucket rate limiter (90 tokens, 9 req/s) with 429 retry backoff (client.py) - Lazy asyncio.Lock init via _get_client_lock() — fixes event-loop module-load crash (client.py) - Double-checked locking in get_http_client() for fast-path (client.py) - Short hex container ID fast-path skips list fetch (docker.py) - Cap resource_data log content to 1 MB / 5,000 lines (manager.py) - Reset reconnect counter after 30 s stable connection (manager.py) - Move tail_lines validation to module level; enforce 10,000 line cap (storage.py, docker.py) - force_terminal=True removed from logging RichHandler (logging.py) Architecture: - Register diagnostic tools in server startup (server.py) - Move ALL_ACTIONS computation to module level in all tools - Consolidate format_kb / format_bytes into shared core/utils.py - Add _safe_get() helper in core/utils.py for nested dict traversal - Extract _analyze_subscription_status() from health.py diagnose handler - Validate required config at startup — fail fast with CRITICAL log (server.py) Code quality: - Remove ~90 lines of dead Rich formatting helpers from logging.py - Remove dead self.websocket attribute from SubscriptionManager - Remove dead setup_uvicorn_logging() wrapper - Move _VALID_IMPORTANCE to module level (N806 fix) - Add slots=True to all three dataclasses (SubscriptionData, SystemHealth, APIResponse) - Fix None rendering as literal "None" string in info.py summaries - Change fuzzy-match log messages from INFO to DEBUG (docker.py) - UTC-aware datetimes throughout (manager.py, diagnostics.py) Infrastructure: - Upgrade base image python:3.11-slim → python:3.12-slim (Dockerfile) - Add non-root appuser (UID/GID 1000) with HEALTHCHECK (Dockerfile) - Add read_only, cap_drop: ALL, tmpfs /tmp to docker-compose.yml - Single-source version via importlib.metadata (pyproject.toml → __init__.py) - Add open_timeout to all websockets.connect() calls Tests: - Update error message matchers to match sanitised messages (test_client.py) - Fix patch targets for UNRAID_API_URL → utils module (test_subscriptions.py) - Fix importance="info" → importance="normal" (test_notifications.py, http_layer) - Fix naive datetime fixtures → UTC-aware (test_subscriptions.py) Co-authored-by: Claude --- Dockerfile | 27 ++- docker-compose.yml | 15 +- pyproject.toml | 4 +- tests/http_layer/test_request_construction.py | 22 +- tests/integration/test_subscriptions.py | 42 ++-- tests/schema/test_query_validation.py | 8 +- tests/test_client.py | 12 +- tests/test_notifications.py | 2 +- tests/test_storage.py | 2 +- unraid_mcp/__init__.py | 8 +- unraid_mcp/config/logging.py | 178 ++++---------- unraid_mcp/config/settings.py | 13 +- unraid_mcp/core/client.py | 220 +++++++++++++++--- unraid_mcp/core/exceptions.py | 54 ++--- unraid_mcp/core/types.py | 20 +- unraid_mcp/core/utils.py | 68 ++++++ unraid_mcp/server.py | 24 +- unraid_mcp/subscriptions/diagnostics.py | 98 ++++++-- unraid_mcp/subscriptions/manager.py | 165 ++++++++----- unraid_mcp/subscriptions/resources.py | 2 +- unraid_mcp/subscriptions/utils.py | 29 ++- unraid_mcp/tools/array.py | 10 +- unraid_mcp/tools/docker.py | 140 ++++++----- unraid_mcp/tools/health.py | 105 ++++++--- unraid_mcp/tools/info.py | 73 +++--- unraid_mcp/tools/keys.py | 16 +- unraid_mcp/tools/notifications.py | 38 +-- unraid_mcp/tools/rclone.py | 59 ++++- unraid_mcp/tools/storage.py | 56 ++--- unraid_mcp/tools/users.py | 12 +- unraid_mcp/tools/virtualization.py | 82 ++++--- uv.lock | 13 -- 32 files changed, 995 insertions(+), 622 deletions(-) create mode 100644 unraid_mcp/core/utils.py diff --git a/Dockerfile b/Dockerfile index 9a97595..bf7baa4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Use an official Python runtime as a parent image -FROM python:3.11-slim +FROM python:3.12-slim # Set the working directory in the container WORKDIR /app @@ -7,13 +7,22 @@ WORKDIR /app # Install uv COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /usr/local/bin/ -# Copy dependency files -COPY pyproject.toml . -COPY uv.lock . -COPY README.md . +# Create non-root user with home directory and give ownership of /app +RUN groupadd --gid 1000 appuser && \ + useradd --uid 1000 --gid 1000 --create-home --shell /bin/false appuser && \ + chown appuser:appuser /app + +# Copy dependency files (owned by appuser via --chown) +COPY --chown=appuser:appuser pyproject.toml . +COPY --chown=appuser:appuser uv.lock . +COPY --chown=appuser:appuser README.md . +COPY --chown=appuser:appuser LICENSE . # Copy the source code -COPY unraid_mcp/ ./unraid_mcp/ +COPY --chown=appuser:appuser unraid_mcp/ ./unraid_mcp/ + +# Switch to non-root user before installing dependencies +USER appuser # Install dependencies and the package RUN uv sync --frozen @@ -31,5 +40,9 @@ ENV UNRAID_API_KEY="" ENV UNRAID_VERIFY_SSL="true" ENV UNRAID_MCP_LOG_LEVEL="INFO" -# Run unraid-mcp-server.py when the container launches +# Health check +HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ + CMD ["python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:6970/mcp')"] + +# Run unraid-mcp-server when the container launches CMD ["uv", "run", "unraid-mcp-server"] diff --git a/docker-compose.yml b/docker-compose.yml index 5544c21..7639bcb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,6 +5,11 @@ services: dockerfile: Dockerfile container_name: unraid-mcp restart: unless-stopped + read_only: true + cap_drop: + - ALL + tmpfs: + - /tmp:noexec,nosuid,size=64m ports: # HostPort:ContainerPort (maps to UNRAID_MCP_PORT inside the container, default 6970) # Change the host port (left side) if 6970 is already in use on your host @@ -13,23 +18,23 @@ services: # Core API Configuration (Required) - UNRAID_API_URL=${UNRAID_API_URL} - UNRAID_API_KEY=${UNRAID_API_KEY} - + # MCP Server Settings - UNRAID_MCP_PORT=${UNRAID_MCP_PORT:-6970} - UNRAID_MCP_HOST=${UNRAID_MCP_HOST:-0.0.0.0} - UNRAID_MCP_TRANSPORT=${UNRAID_MCP_TRANSPORT:-streamable-http} - + # SSL Configuration - UNRAID_VERIFY_SSL=${UNRAID_VERIFY_SSL:-true} - + # Logging Configuration - UNRAID_MCP_LOG_LEVEL=${UNRAID_MCP_LOG_LEVEL:-INFO} - UNRAID_MCP_LOG_FILE=${UNRAID_MCP_LOG_FILE:-unraid-mcp.log} - + # Real-time Subscription Configuration - UNRAID_AUTO_START_SUBSCRIPTIONS=${UNRAID_AUTO_START_SUBSCRIPTIONS:-true} - UNRAID_MAX_RECONNECT_ATTEMPTS=${UNRAID_MAX_RECONNECT_ATTEMPTS:-10} - + # Optional: Custom log file path for subscription auto-start diagnostics - UNRAID_AUTOSTART_LOG_PATH=${UNRAID_AUTOSTART_LOG_PATH} # Optional: If you want to mount a specific directory for logs (ensure UNRAID_MCP_LOG_FILE points within this mount) diff --git a/pyproject.toml b/pyproject.toml index 1de7e8a..0515555 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,7 +77,6 @@ dependencies = [ "uvicorn[standard]>=0.35.0", "websockets>=15.0.1", "rich>=14.1.0", - "pytz>=2025.2", ] # ============================================================================ @@ -170,6 +169,8 @@ select = [ "PERF", # Ruff-specific rules "RUF", + # flake8-bandit (security) + "S", ] ignore = [ "E501", # line too long (handled by ruff formatter) @@ -285,7 +286,6 @@ dev = [ "pytest-asyncio>=1.2.0", "pytest-cov>=7.0.0", "respx>=0.22.0", - "types-pytz>=2025.2.0.20250809", "ty>=0.0.15", "ruff>=0.12.8", "build>=1.2.2", diff --git a/tests/http_layer/test_request_construction.py b/tests/http_layer/test_request_construction.py index a93dbaf..8ac7ad1 100644 --- a/tests/http_layer/test_request_construction.py +++ b/tests/http_layer/test_request_construction.py @@ -158,43 +158,43 @@ class TestHttpErrorHandling: @respx.mock async def test_http_401_raises_tool_error(self) -> None: respx.post(API_URL).mock(return_value=httpx.Response(401, text="Unauthorized")) - with pytest.raises(ToolError, match="HTTP error 401"): + with pytest.raises(ToolError, match="Unraid API returned HTTP 401"): await make_graphql_request("query { online }") @respx.mock async def test_http_403_raises_tool_error(self) -> None: respx.post(API_URL).mock(return_value=httpx.Response(403, text="Forbidden")) - with pytest.raises(ToolError, match="HTTP error 403"): + with pytest.raises(ToolError, match="Unraid API returned HTTP 403"): await make_graphql_request("query { online }") @respx.mock async def test_http_500_raises_tool_error(self) -> None: respx.post(API_URL).mock(return_value=httpx.Response(500, text="Internal Server Error")) - with pytest.raises(ToolError, match="HTTP error 500"): + with pytest.raises(ToolError, match="Unraid API returned HTTP 500"): await make_graphql_request("query { online }") @respx.mock async def test_http_503_raises_tool_error(self) -> None: respx.post(API_URL).mock(return_value=httpx.Response(503, text="Service Unavailable")) - with pytest.raises(ToolError, match="HTTP error 503"): + with pytest.raises(ToolError, match="Unraid API returned HTTP 503"): await make_graphql_request("query { online }") @respx.mock async def test_network_connection_error(self) -> None: respx.post(API_URL).mock(side_effect=httpx.ConnectError("Connection refused")) - with pytest.raises(ToolError, match="Network connection error"): + with pytest.raises(ToolError, match="Network error connecting to Unraid API"): await make_graphql_request("query { online }") @respx.mock async def test_network_timeout_error(self) -> None: respx.post(API_URL).mock(side_effect=httpx.ReadTimeout("Read timed out")) - with pytest.raises(ToolError, match="Network connection error"): + with pytest.raises(ToolError, match="Network error connecting to Unraid API"): await make_graphql_request("query { online }") @respx.mock async def test_invalid_json_response(self) -> None: respx.post(API_URL).mock(return_value=httpx.Response(200, text="not json")) - with pytest.raises(ToolError, match="Invalid JSON response"): + with pytest.raises(ToolError, match="invalid response"): await make_graphql_request("query { online }") @@ -868,14 +868,14 @@ class TestNotificationsToolRequests: title="Test", subject="Sub", description="Desc", - importance="info", + importance="normal", ) body = _extract_request_body(route.calls.last.request) assert "CreateNotification" in body["query"] inp = body["variables"]["input"] assert inp["title"] == "Test" assert inp["subject"] == "Sub" - assert inp["importance"] == "INFO" # uppercased + assert inp["importance"] == "NORMAL" # uppercased from "normal" @respx.mock async def test_archive_sends_id_variable(self) -> None: @@ -1256,7 +1256,7 @@ class TestCrossCuttingConcerns: tool = make_tool_fn( "unraid_mcp.tools.info", "register_info_tool", "unraid_info" ) - with pytest.raises(ToolError, match="HTTP error 500"): + with pytest.raises(ToolError, match="Unraid API returned HTTP 500"): await tool(action="online") @respx.mock @@ -1268,7 +1268,7 @@ class TestCrossCuttingConcerns: tool = make_tool_fn( "unraid_mcp.tools.info", "register_info_tool", "unraid_info" ) - with pytest.raises(ToolError, match="Network connection error"): + with pytest.raises(ToolError, match="Network error connecting to Unraid API"): await tool(action="online") @respx.mock diff --git a/tests/integration/test_subscriptions.py b/tests/integration/test_subscriptions.py index 5d3d384..22e3954 100644 --- a/tests/integration/test_subscriptions.py +++ b/tests/integration/test_subscriptions.py @@ -7,7 +7,7 @@ data management without requiring a live Unraid server. import asyncio import json -from datetime import datetime +from datetime import UTC, datetime from typing import Any from unittest.mock import AsyncMock, MagicMock, patch @@ -83,7 +83,7 @@ SAMPLE_QUERY = "subscription { test { value } }" # Shared patch targets _WS_CONNECT = "unraid_mcp.subscriptions.manager.websockets.connect" -_API_URL = "unraid_mcp.subscriptions.manager.UNRAID_API_URL" +_API_URL = "unraid_mcp.subscriptions.utils.UNRAID_API_URL" _API_KEY = "unraid_mcp.subscriptions.manager.UNRAID_API_KEY" _SSL_CTX = "unraid_mcp.subscriptions.manager.build_ws_ssl_context" _SLEEP = "unraid_mcp.subscriptions.manager.asyncio.sleep" @@ -100,7 +100,7 @@ class TestSubscriptionManagerInit: mgr = SubscriptionManager() assert mgr.active_subscriptions == {} assert mgr.resource_data == {} - assert mgr.websocket is None + assert not hasattr(mgr, "websocket") def test_default_auto_start_enabled(self) -> None: mgr = SubscriptionManager() @@ -720,20 +720,20 @@ class TestWebSocketURLConstruction: class TestResourceData: - def test_get_resource_data_returns_none_when_empty(self) -> None: + async def test_get_resource_data_returns_none_when_empty(self) -> None: mgr = SubscriptionManager() - assert mgr.get_resource_data("nonexistent") is None + assert await mgr.get_resource_data("nonexistent") is None - def test_get_resource_data_returns_stored_data(self) -> None: + async def test_get_resource_data_returns_stored_data(self) -> None: from unraid_mcp.core.types import SubscriptionData mgr = SubscriptionManager() mgr.resource_data["test"] = SubscriptionData( data={"key": "value"}, - last_updated=datetime.now(), + last_updated=datetime.now(UTC), subscription_type="test", ) - result = mgr.get_resource_data("test") + result = await mgr.get_resource_data("test") assert result == {"key": "value"} def test_list_active_subscriptions_empty(self) -> None: @@ -755,46 +755,46 @@ class TestResourceData: class TestSubscriptionStatus: - def test_status_includes_all_configured_subscriptions(self) -> None: + async def test_status_includes_all_configured_subscriptions(self) -> None: mgr = SubscriptionManager() - status = mgr.get_subscription_status() + status = await mgr.get_subscription_status() for name in mgr.subscription_configs: assert name in status - def test_status_default_connection_state(self) -> None: + async def test_status_default_connection_state(self) -> None: mgr = SubscriptionManager() - status = mgr.get_subscription_status() + status = await mgr.get_subscription_status() for sub_status in status.values(): assert sub_status["runtime"]["connection_state"] == "not_started" - def test_status_shows_active_flag(self) -> None: + async def test_status_shows_active_flag(self) -> None: mgr = SubscriptionManager() mgr.active_subscriptions["logFileSubscription"] = MagicMock() - status = mgr.get_subscription_status() + status = await mgr.get_subscription_status() assert status["logFileSubscription"]["runtime"]["active"] is True - def test_status_shows_data_availability(self) -> None: + async def test_status_shows_data_availability(self) -> None: from unraid_mcp.core.types import SubscriptionData mgr = SubscriptionManager() mgr.resource_data["logFileSubscription"] = SubscriptionData( data={"log": "content"}, - last_updated=datetime.now(), + last_updated=datetime.now(UTC), subscription_type="logFileSubscription", ) - status = mgr.get_subscription_status() + status = await mgr.get_subscription_status() assert status["logFileSubscription"]["data"]["available"] is True - def test_status_shows_error_info(self) -> None: + async def test_status_shows_error_info(self) -> None: mgr = SubscriptionManager() mgr.last_error["logFileSubscription"] = "Test error message" - status = mgr.get_subscription_status() + status = await mgr.get_subscription_status() assert status["logFileSubscription"]["runtime"]["last_error"] == "Test error message" - def test_status_reconnect_attempts_tracked(self) -> None: + async def test_status_reconnect_attempts_tracked(self) -> None: mgr = SubscriptionManager() mgr.reconnect_attempts["logFileSubscription"] = 3 - status = mgr.get_subscription_status() + status = await mgr.get_subscription_status() assert status["logFileSubscription"]["runtime"]["reconnect_attempts"] == 3 diff --git a/tests/schema/test_query_validation.py b/tests/schema/test_query_validation.py index 59eb765..c72aad6 100644 --- a/tests/schema/test_query_validation.py +++ b/tests/schema/test_query_validation.py @@ -384,10 +384,16 @@ class TestVmQueries: errors = _validate_operation(schema, QUERIES["list"]) assert not errors, f"list query validation failed: {errors}" + def test_details_query(self, schema: GraphQLSchema) -> None: + from unraid_mcp.tools.virtualization import QUERIES + + errors = _validate_operation(schema, QUERIES["details"]) + assert not errors, f"details query validation failed: {errors}" + def test_all_vm_queries_covered(self, schema: GraphQLSchema) -> None: from unraid_mcp.tools.virtualization import QUERIES - assert set(QUERIES.keys()) == {"list"} + assert set(QUERIES.keys()) == {"list", "details"} class TestVmMutations: diff --git a/tests/test_client.py b/tests/test_client.py index b144b75..9208d76 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -274,7 +274,7 @@ class TestMakeGraphQLRequestErrors: with ( patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), - pytest.raises(ToolError, match="HTTP error 401"), + pytest.raises(ToolError, match="Unraid API returned HTTP 401"), ): await make_graphql_request("{ info }") @@ -292,7 +292,7 @@ class TestMakeGraphQLRequestErrors: with ( patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), - pytest.raises(ToolError, match="HTTP error 500"), + pytest.raises(ToolError, match="Unraid API returned HTTP 500"), ): await make_graphql_request("{ info }") @@ -310,7 +310,7 @@ class TestMakeGraphQLRequestErrors: with ( patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), - pytest.raises(ToolError, match="HTTP error 503"), + pytest.raises(ToolError, match="Unraid API returned HTTP 503"), ): await make_graphql_request("{ info }") @@ -320,7 +320,7 @@ class TestMakeGraphQLRequestErrors: with ( patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), - pytest.raises(ToolError, match="Network connection error"), + pytest.raises(ToolError, match="Network error connecting to Unraid API"), ): await make_graphql_request("{ info }") @@ -330,7 +330,7 @@ class TestMakeGraphQLRequestErrors: with ( patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), - pytest.raises(ToolError, match="Network connection error"), + pytest.raises(ToolError, match="Network error connecting to Unraid API"), ): await make_graphql_request("{ info }") @@ -344,7 +344,7 @@ class TestMakeGraphQLRequestErrors: with ( patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), - pytest.raises(ToolError, match="Invalid JSON response"), + pytest.raises(ToolError, match="invalid response.*not valid JSON"), ): await make_graphql_request("{ info }") diff --git a/tests/test_notifications.py b/tests/test_notifications.py index 0ad9dc3..af07977 100644 --- a/tests/test_notifications.py +++ b/tests/test_notifications.py @@ -92,7 +92,7 @@ class TestNotificationsActions: title="Test", subject="Test Subject", description="Test Desc", - importance="info", + importance="normal", ) assert result["success"] is True diff --git a/tests/test_storage.py b/tests/test_storage.py index 9cd7867..77d5ea9 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -7,7 +7,7 @@ import pytest from conftest import make_tool_fn from unraid_mcp.core.exceptions import ToolError -from unraid_mcp.tools.storage import format_bytes +from unraid_mcp.core.utils import format_bytes # --- Unit tests for helpers --- diff --git a/unraid_mcp/__init__.py b/unraid_mcp/__init__.py index 1b08ab3..b6d6c59 100644 --- a/unraid_mcp/__init__.py +++ b/unraid_mcp/__init__.py @@ -4,4 +4,10 @@ A modular MCP (Model Context Protocol) server that provides tools to interact with an Unraid server's GraphQL API. """ -__version__ = "0.2.0" +from importlib.metadata import PackageNotFoundError, version + + +try: + __version__ = version("unraid-mcp") +except PackageNotFoundError: + __version__ = "0.0.0" diff --git a/unraid_mcp/config/logging.py b/unraid_mcp/config/logging.py index c6ed490..0df21c6 100644 --- a/unraid_mcp/config/logging.py +++ b/unraid_mcp/config/logging.py @@ -5,16 +5,10 @@ that cap at 10MB and start over (no rotation) for consistent use across all modu """ import logging -from datetime import datetime from pathlib import Path -import pytz -from rich.align import Align from rich.console import Console from rich.logging import RichHandler -from rich.panel import Panel -from rich.rule import Rule -from rich.text import Text try: @@ -28,7 +22,7 @@ from .settings import LOG_FILE_PATH, LOG_LEVEL_STR # Global Rich console for consistent formatting -console = Console(stderr=True, force_terminal=True) +console = Console(stderr=True) class OverwriteFileHandler(logging.FileHandler): @@ -45,12 +39,18 @@ class OverwriteFileHandler(logging.FileHandler): delay: Whether to delay file opening """ self.max_bytes = max_bytes + self._emit_count = 0 + self._check_interval = 100 super().__init__(filename, mode, encoding, delay) def emit(self, record): - """Emit a record, checking file size and overwriting if needed.""" - # Check file size before writing - if self.stream and hasattr(self.stream, "name"): + """Emit a record, checking file size periodically and overwriting if needed.""" + self._emit_count += 1 + if ( + self._emit_count % self._check_interval == 0 + and self.stream + and hasattr(self.stream, "name") + ): try: base_path = Path(self.baseFilename) if base_path.exists(): @@ -91,6 +91,28 @@ class OverwriteFileHandler(logging.FileHandler): super().emit(record) +def _create_shared_file_handler() -> OverwriteFileHandler: + """Create the single shared file handler for all loggers. + + Returns: + Configured OverwriteFileHandler instance + """ + numeric_log_level = getattr(logging, LOG_LEVEL_STR, logging.INFO) + handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8") + handler.setLevel(numeric_log_level) + handler.setFormatter( + logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s" + ) + ) + return handler + + +# Single shared file handler — all loggers reuse this instance to avoid +# race conditions from multiple OverwriteFileHandler instances on the same file. +_shared_file_handler = _create_shared_file_handler() + + def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger: """Set up and configure the logger with console and file handlers. @@ -118,19 +140,13 @@ def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger: show_level=True, show_path=False, rich_tracebacks=True, - tracebacks_show_locals=True, + tracebacks_show_locals=False, ) console_handler.setLevel(numeric_log_level) logger.addHandler(console_handler) - # File Handler with 10MB cap (overwrites instead of rotating) - file_handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8") - file_handler.setLevel(numeric_log_level) - file_formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s" - ) - file_handler.setFormatter(file_formatter) - logger.addHandler(file_handler) + # Reuse the shared file handler + logger.addHandler(_shared_file_handler) return logger @@ -157,20 +173,14 @@ def configure_fastmcp_logger_with_rich() -> logging.Logger | None: show_level=True, show_path=False, rich_tracebacks=True, - tracebacks_show_locals=True, + tracebacks_show_locals=False, markup=True, ) console_handler.setLevel(numeric_log_level) fastmcp_logger.addHandler(console_handler) - # File Handler with 10MB cap (overwrites instead of rotating) - file_handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8") - file_handler.setLevel(numeric_log_level) - file_formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s" - ) - file_handler.setFormatter(file_formatter) - fastmcp_logger.addHandler(file_handler) + # Reuse the shared file handler + fastmcp_logger.addHandler(_shared_file_handler) fastmcp_logger.setLevel(numeric_log_level) @@ -186,30 +196,19 @@ def configure_fastmcp_logger_with_rich() -> logging.Logger | None: show_level=True, show_path=False, rich_tracebacks=True, - tracebacks_show_locals=True, + tracebacks_show_locals=False, markup=True, ) root_console_handler.setLevel(numeric_log_level) root_logger.addHandler(root_console_handler) - # File Handler for root logger with 10MB cap (overwrites instead of rotating) - root_file_handler = OverwriteFileHandler( - LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8" - ) - root_file_handler.setLevel(numeric_log_level) - root_file_handler.setFormatter(file_formatter) - root_logger.addHandler(root_file_handler) + # Reuse the shared file handler for root logger + root_logger.addHandler(_shared_file_handler) root_logger.setLevel(numeric_log_level) return fastmcp_logger -def setup_uvicorn_logging() -> logging.Logger | None: - """Configure uvicorn and other third-party loggers to use Rich formatting.""" - # This function is kept for backward compatibility but now delegates to FastMCP - return configure_fastmcp_logger_with_rich() - - def log_configuration_status(logger: logging.Logger) -> None: """Log configuration status at startup. @@ -242,97 +241,6 @@ def log_configuration_status(logger: logging.Logger) -> None: logger.error(f"Missing required configuration: {config['missing_config']}") -# Development logging helpers for Rich formatting -def get_est_timestamp() -> str: - """Get current timestamp in EST timezone with YY/MM/DD format.""" - est = pytz.timezone("US/Eastern") - now = datetime.now(est) - return now.strftime("%y/%m/%d %H:%M:%S") - - -def log_header(title: str) -> None: - """Print a beautiful header panel with Nordic blue styling.""" - panel = Panel( - Align.center(Text(title, style="bold white")), - style="#5E81AC", # Nordic blue - padding=(0, 2), - border_style="#81A1C1", # Light Nordic blue - ) - console.print(panel) - - -def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0) -> None: - """Log a message with specific level and indentation.""" - timestamp = get_est_timestamp() - indent_str = " " * indent - - # Enhanced Nordic color scheme with more blues - level_config = { - "error": {"color": "#BF616A", "icon": "❌", "style": "bold"}, # Nordic red - "warning": {"color": "#EBCB8B", "icon": "⚠️", "style": ""}, # Nordic yellow - "success": {"color": "#A3BE8C", "icon": "✅", "style": "bold"}, # Nordic green - "info": {"color": "#5E81AC", "icon": "\u2139\ufe0f", "style": "bold"}, # Nordic blue (bold) - "status": {"color": "#81A1C1", "icon": "🔍", "style": ""}, # Light Nordic blue - "debug": {"color": "#4C566A", "icon": "🐛", "style": ""}, # Nordic dark gray - } - - config = level_config.get( - level, {"color": "#81A1C1", "icon": "•", "style": ""} - ) # Default to light Nordic blue - - # Create beautifully formatted text - text = Text() - - # Timestamp with Nordic blue styling - text.append(f"[{timestamp}]", style="#81A1C1") # Light Nordic blue for timestamps - text.append(" ") - - # Indentation with Nordic blue styling - if indent > 0: - text.append(indent_str, style="#81A1C1") - - # Level icon (only for certain levels) - if level in ["error", "warning", "success"]: - # Extract emoji from message if it starts with one, to avoid duplication - if message and len(message) > 0 and ord(message[0]) >= 0x1F600: # Emoji range - # Message already has emoji, don't add icon - pass - else: - text.append(f"{config['icon']} ", style=config["color"]) - - # Message content - message_style = f"{config['color']} {config['style']}".strip() - text.append(message, style=message_style) - - console.print(text) - - -def log_separator() -> None: - """Print a beautiful separator line with Nordic blue styling.""" - console.print(Rule(style="#81A1C1")) - - -# Convenience functions for different log levels -def log_error(message: str, indent: int = 0) -> None: - log_with_level_and_indent(message, "error", indent) - - -def log_warning(message: str, indent: int = 0) -> None: - log_with_level_and_indent(message, "warning", indent) - - -def log_success(message: str, indent: int = 0) -> None: - log_with_level_and_indent(message, "success", indent) - - -def log_info(message: str, indent: int = 0) -> None: - log_with_level_and_indent(message, "info", indent) - - -def log_status(message: str, indent: int = 0) -> None: - log_with_level_and_indent(message, "status", indent) - - # Global logger instance - modules can import this directly if FASTMCP_AVAILABLE: # Use FastMCP logger with Rich formatting @@ -341,5 +249,5 @@ if FASTMCP_AVAILABLE: else: # Fallback to our custom logger if FastMCP is not available logger = setup_logger() - # Setup uvicorn logging when module is imported - setup_uvicorn_logging() + # Also configure FastMCP logger for consistency + configure_fastmcp_logger_with_rich() diff --git a/unraid_mcp/config/settings.py b/unraid_mcp/config/settings.py index e2cd869..cdea8b6 100644 --- a/unraid_mcp/config/settings.py +++ b/unraid_mcp/config/settings.py @@ -5,6 +5,7 @@ and provides all configuration constants used throughout the application. """ import os +from importlib.metadata import PackageNotFoundError, version from pathlib import Path from typing import Any @@ -30,8 +31,11 @@ for dotenv_path in dotenv_paths: load_dotenv(dotenv_path=dotenv_path) break -# Application Version -VERSION = "0.2.0" +# Application Version (single source of truth: pyproject.toml) +try: + VERSION = version("unraid-mcp") +except PackageNotFoundError: + VERSION = "0.0.0" # Core API Configuration UNRAID_API_URL = os.getenv("UNRAID_API_URL") @@ -39,7 +43,7 @@ UNRAID_API_KEY = os.getenv("UNRAID_API_KEY") # Server Configuration UNRAID_MCP_PORT = int(os.getenv("UNRAID_MCP_PORT", "6970")) -UNRAID_MCP_HOST = os.getenv("UNRAID_MCP_HOST", "0.0.0.0") +UNRAID_MCP_HOST = os.getenv("UNRAID_MCP_HOST", "0.0.0.0") # noqa: S104 — intentional for Docker UNRAID_MCP_TRANSPORT = os.getenv("UNRAID_MCP_TRANSPORT", "streamable-http").lower() # SSL Configuration @@ -54,7 +58,8 @@ else: # Path to CA bundle # Logging Configuration LOG_LEVEL_STR = os.getenv("UNRAID_MCP_LOG_LEVEL", "INFO").upper() LOG_FILE_NAME = os.getenv("UNRAID_MCP_LOG_FILE", "unraid-mcp.log") -LOGS_DIR = Path("/tmp") +# Use /app/logs in Docker, project-relative logs/ directory otherwise +LOGS_DIR = Path("/app/logs") if Path("/app").is_dir() else PROJECT_ROOT / "logs" LOG_FILE_PATH = LOGS_DIR / LOG_FILE_NAME # Ensure logs directory exists diff --git a/unraid_mcp/core/client.py b/unraid_mcp/core/client.py index b3f511d..9c6369b 100644 --- a/unraid_mcp/core/client.py +++ b/unraid_mcp/core/client.py @@ -5,7 +5,9 @@ to the Unraid API with proper timeout handling and error management. """ import asyncio +import hashlib import json +import time from typing import Any import httpx @@ -22,7 +24,19 @@ from ..core.exceptions import ToolError # Sensitive keys to redact from debug logs -_SENSITIVE_KEYS = {"password", "key", "secret", "token", "apikey"} +_SENSITIVE_KEYS = { + "password", + "key", + "secret", + "token", + "apikey", + "authorization", + "cookie", + "session", + "credential", + "passphrase", + "jwt", +} def _is_sensitive_key(key: str) -> bool: @@ -67,7 +81,121 @@ def get_timeout_for_operation(profile: str) -> httpx.Timeout: # Global connection pool (module-level singleton) _http_client: httpx.AsyncClient | None = None -_client_lock = asyncio.Lock() +_client_lock: asyncio.Lock | None = None + + +def _get_client_lock() -> asyncio.Lock: + """Get or create the client lock (lazy init to avoid event loop issues).""" + global _client_lock + if _client_lock is None: + _client_lock = asyncio.Lock() + return _client_lock + + +class _RateLimiter: + """Token bucket rate limiter for Unraid API (100 req / 10s hard limit). + + Uses 90 tokens with 9.0 tokens/sec refill for 10% safety headroom. + """ + + def __init__(self, max_tokens: int = 90, refill_rate: float = 9.0) -> None: + self.max_tokens = max_tokens + self.tokens = float(max_tokens) + self.refill_rate = refill_rate # tokens per second + self.last_refill = time.monotonic() + self._lock: asyncio.Lock | None = None + + def _get_lock(self) -> asyncio.Lock: + if self._lock is None: + self._lock = asyncio.Lock() + return self._lock + + def _refill(self) -> None: + """Refill tokens based on elapsed time.""" + now = time.monotonic() + elapsed = now - self.last_refill + self.tokens = min(self.max_tokens, self.tokens + elapsed * self.refill_rate) + self.last_refill = now + + async def acquire(self) -> None: + """Consume one token, waiting if necessary for refill.""" + while True: + async with self._get_lock(): + self._refill() + if self.tokens >= 1: + self.tokens -= 1 + return + wait_time = (1 - self.tokens) / self.refill_rate + + # Sleep outside the lock so other coroutines aren't blocked + await asyncio.sleep(wait_time) + + +_rate_limiter = _RateLimiter() + + +# --- TTL Cache for stable read-only queries --- + +# Queries whose results change infrequently and are safe to cache. +# Mutations and volatile queries (metrics, docker, array state) are excluded. +_CACHEABLE_QUERY_PREFIXES = frozenset( + { + "GetNetworkConfig", + "GetRegistrationInfo", + "GetOwner", + "GetFlash", + } +) + +_CACHE_TTL_SECONDS = 60.0 + + +class _QueryCache: + """Simple TTL cache for GraphQL query responses. + + Keyed by a hash of (query, variables). Entries expire after _CACHE_TTL_SECONDS. + Only caches responses for queries whose operation name is in _CACHEABLE_QUERY_PREFIXES. + Mutation requests always bypass the cache. + """ + + def __init__(self) -> None: + self._store: dict[str, tuple[float, dict[str, Any]]] = {} + + @staticmethod + def _cache_key(query: str, variables: dict[str, Any] | None) -> str: + raw = query + json.dumps(variables or {}, sort_keys=True) + return hashlib.sha256(raw.encode()).hexdigest() + + @staticmethod + def is_cacheable(query: str) -> bool: + """Check if a query is eligible for caching based on its operation name.""" + if query.lstrip().startswith("mutation"): + return False + return any(prefix in query for prefix in _CACHEABLE_QUERY_PREFIXES) + + def get(self, query: str, variables: dict[str, Any] | None) -> dict[str, Any] | None: + """Return cached result if present and not expired, else None.""" + key = self._cache_key(query, variables) + entry = self._store.get(key) + if entry is None: + return None + expires_at, data = entry + if time.monotonic() > expires_at: + del self._store[key] + return None + return data + + def put(self, query: str, variables: dict[str, Any] | None, data: dict[str, Any]) -> None: + """Store a query result with TTL expiry.""" + key = self._cache_key(query, variables) + self._store[key] = (time.monotonic() + _CACHE_TTL_SECONDS, data) + + def invalidate_all(self) -> None: + """Clear the entire cache (called after mutations).""" + self._store.clear() + + +_query_cache = _QueryCache() def is_idempotent_error(error_message: str, operation: str) -> bool: @@ -109,7 +237,7 @@ async def _create_http_client() -> httpx.AsyncClient: return httpx.AsyncClient( # Connection pool settings limits=httpx.Limits( - max_keepalive_connections=20, max_connections=100, keepalive_expiry=30.0 + max_keepalive_connections=20, max_connections=20, keepalive_expiry=30.0 ), # Default timeout (can be overridden per-request) timeout=DEFAULT_TIMEOUT, @@ -123,40 +251,35 @@ async def _create_http_client() -> httpx.AsyncClient: async def get_http_client() -> httpx.AsyncClient: """Get or create shared HTTP client with connection pooling. - The client is protected by an asyncio lock to prevent concurrent creation. - If the existing client was closed (e.g., during shutdown), a new one is created. + Uses double-checked locking: fast-path skips the lock when the client + is already initialized, only acquiring it for initial creation or + recovery after close. Returns: Singleton AsyncClient instance with connection pooling enabled """ global _http_client - async with _client_lock: + # Fast-path: skip lock if client is already initialized and open + client = _http_client + if client is not None and not client.is_closed: + return client + + # Slow-path: acquire lock for initialization + async with _get_client_lock(): if _http_client is None or _http_client.is_closed: _http_client = await _create_http_client() logger.info( - "Created shared HTTP client with connection pooling (20 keepalive, 100 max connections)" + "Created shared HTTP client with connection pooling (20 keepalive, 20 max connections)" ) - - client = _http_client - - # Verify client is still open after releasing the lock. - # In asyncio's cooperative model this is unlikely to fail, but guards - # against edge cases where close_http_client runs between yield points. - if client.is_closed: - async with _client_lock: - _http_client = await _create_http_client() - client = _http_client - logger.info("Re-created HTTP client after unexpected close") - - return client + return _http_client async def close_http_client() -> None: """Close the shared HTTP client (call on server shutdown).""" global _http_client - async with _client_lock: + async with _get_client_lock(): if _http_client is not None: await _http_client.aclose() _http_client = None @@ -190,6 +313,14 @@ async def make_graphql_request( if not UNRAID_API_KEY: raise ToolError("UNRAID_API_KEY not configured") + # Check TTL cache for stable read-only queries + is_mutation = query.lstrip().startswith("mutation") + if not is_mutation and _query_cache.is_cacheable(query): + cached = _query_cache.get(query, variables) + if cached is not None: + logger.debug("Returning cached response for query") + return cached + headers = { "Content-Type": "application/json", "X-API-Key": UNRAID_API_KEY, @@ -205,17 +336,31 @@ async def make_graphql_request( logger.debug(f"Variables: {_redact_sensitive(variables)}") try: + # Rate limit: consume a token before making the request + await _rate_limiter.acquire() + # Get the shared HTTP client with connection pooling client = await get_http_client() - # Override timeout if custom timeout specified + # Retry loop for 429 rate limit responses + post_kwargs: dict[str, Any] = {"json": payload, "headers": headers} if custom_timeout is not None: - response = await client.post( - UNRAID_API_URL, json=payload, headers=headers, timeout=custom_timeout - ) - else: - response = await client.post(UNRAID_API_URL, json=payload, headers=headers) + post_kwargs["timeout"] = custom_timeout + response: httpx.Response | None = None + for attempt in range(3): + response = await client.post(UNRAID_API_URL, **post_kwargs) + if response.status_code == 429: + backoff = 2**attempt + logger.warning( + f"Rate limited (429) by Unraid API, retrying in {backoff}s (attempt {attempt + 1}/3)" + ) + await asyncio.sleep(backoff) + continue + break + + if response is None: # pragma: no cover — guaranteed by loop + raise ToolError("No response received after retry attempts") response.raise_for_status() # Raise an exception for HTTP error codes 4xx/5xx response_data = response.json() @@ -245,14 +390,27 @@ async def make_graphql_request( logger.debug("GraphQL request successful.") data = response_data.get("data", {}) - return data if isinstance(data, dict) else {} # Ensure we return dict + result = data if isinstance(data, dict) else {} # Ensure we return dict + + # Invalidate cache on mutations; cache eligible query results + if is_mutation: + _query_cache.invalidate_all() + elif _query_cache.is_cacheable(query): + _query_cache.put(query, variables, result) + + return result except httpx.HTTPStatusError as e: + # Log full details internally; only expose status code to MCP client logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}") - raise ToolError(f"HTTP error {e.response.status_code}: {e.response.text}") from e + raise ToolError( + f"Unraid API returned HTTP {e.response.status_code}. Check server logs for details." + ) from e except httpx.RequestError as e: + # Log full error internally; give safe summary to MCP client logger.error(f"Request error occurred: {e}") - raise ToolError(f"Network connection error: {e!s}") from e + raise ToolError(f"Network error connecting to Unraid API: {type(e).__name__}") from e except json.JSONDecodeError as e: + # Log full decode error; give safe summary to MCP client logger.error(f"Failed to decode JSON response: {e}") - raise ToolError(f"Invalid JSON response from Unraid API: {e!s}") from e + raise ToolError("Unraid API returned an invalid response (not valid JSON)") from e diff --git a/unraid_mcp/core/exceptions.py b/unraid_mcp/core/exceptions.py index 2731387..c5b99cf 100644 --- a/unraid_mcp/core/exceptions.py +++ b/unraid_mcp/core/exceptions.py @@ -4,6 +4,10 @@ This module defines custom exception classes for consistent error handling throughout the application, with proper integration to FastMCP's error system. """ +import contextlib +import logging +from collections.abc import Generator + from fastmcp.exceptions import ToolError as FastMCPToolError @@ -19,36 +23,26 @@ class ToolError(FastMCPToolError): pass -class ConfigurationError(ToolError): - """Raised when there are configuration-related errors.""" +@contextlib.contextmanager +def tool_error_handler( + tool_name: str, + action: str, + logger: logging.Logger, +) -> Generator[None]: + """Context manager that standardizes tool error handling. - pass + Re-raises ToolError as-is. Catches all other exceptions, logs them + with full traceback, and wraps them in ToolError with a descriptive message. - -class UnraidAPIError(ToolError): - """Raised when the Unraid API returns an error or is unreachable.""" - - pass - - -class SubscriptionError(ToolError): - """Raised when there are WebSocket subscription-related errors.""" - - pass - - -class ValidationError(ToolError): - """Raised when input validation fails.""" - - pass - - -class IdempotentOperationError(ToolError): - """Raised when an operation is idempotent (already in desired state). - - This is used internally to signal that an operation was already complete, - which should typically be converted to a success response rather than - propagated as an error to the user. + Args: + tool_name: The tool name for error messages (e.g., "docker", "vm"). + action: The current action being executed. + logger: The logger instance to use for error logging. """ - - pass + try: + yield + except ToolError: + raise + except Exception as e: + logger.error(f"Error in unraid_{tool_name} action={action}: {e}", exc_info=True) + raise ToolError(f"Failed to execute {tool_name}/{action}: {e!s}") from e diff --git a/unraid_mcp/core/types.py b/unraid_mcp/core/types.py index b48a4df..9b7ec8a 100644 --- a/unraid_mcp/core/types.py +++ b/unraid_mcp/core/types.py @@ -9,27 +9,33 @@ from datetime import datetime from typing import Any -@dataclass +@dataclass(slots=True) class SubscriptionData: - """Container for subscription data with metadata.""" + """Container for subscription data with metadata. + + Note: last_updated must be timezone-aware (use datetime.now(UTC)). + """ data: dict[str, Any] - last_updated: datetime + last_updated: datetime # Must be timezone-aware (UTC) subscription_type: str -@dataclass +@dataclass(slots=True) class SystemHealth: - """Container for system health status information.""" + """Container for system health status information. + + Note: last_checked must be timezone-aware (use datetime.now(UTC)). + """ is_healthy: bool issues: list[str] warnings: list[str] - last_checked: datetime + last_checked: datetime # Must be timezone-aware (UTC) component_status: dict[str, str] -@dataclass +@dataclass(slots=True) class APIResponse: """Container for standardized API response data.""" diff --git a/unraid_mcp/core/utils.py b/unraid_mcp/core/utils.py new file mode 100644 index 0000000..1db6dc4 --- /dev/null +++ b/unraid_mcp/core/utils.py @@ -0,0 +1,68 @@ +"""Shared utility functions for Unraid MCP tools.""" + +from typing import Any + + +def safe_get(data: dict[str, Any], *keys: str, default: Any = None) -> Any: + """Safely traverse nested dict keys, handling None intermediates. + + Args: + data: The root dictionary to traverse. + *keys: Sequence of keys to follow. + default: Value to return if any key is missing or None. + + Returns: + The value at the end of the key chain, or default if unreachable. + """ + current = data + for key in keys: + if not isinstance(current, dict): + return default + current = current.get(key) + return current if current is not None else default + + +def format_bytes(bytes_value: int | None) -> str: + """Format byte values into human-readable sizes. + + Args: + bytes_value: Number of bytes, or None. + + Returns: + Human-readable string like "1.00 GB" or "N/A" if input is None/invalid. + """ + if bytes_value is None: + return "N/A" + try: + value = float(int(bytes_value)) + except (ValueError, TypeError): + return "N/A" + for unit in ["B", "KB", "MB", "GB", "TB", "PB"]: + if value < 1024.0: + return f"{value:.2f} {unit}" + value /= 1024.0 + return f"{value:.2f} EB" + + +def format_kb(k: Any) -> str: + """Format kilobyte values into human-readable sizes. + + Args: + k: Number of kilobytes, or None. + + Returns: + Human-readable string like "1.00 GB" or "N/A" if input is None/invalid. + """ + if k is None: + return "N/A" + try: + k = int(k) + except (ValueError, TypeError): + return "N/A" + if k >= 1024 * 1024 * 1024: + return f"{k / (1024 * 1024 * 1024):.2f} TB" + if k >= 1024 * 1024: + return f"{k / (1024 * 1024):.2f} GB" + if k >= 1024: + return f"{k / 1024:.2f} MB" + return f"{k} KB" diff --git a/unraid_mcp/server.py b/unraid_mcp/server.py index be711da..91794af 100644 --- a/unraid_mcp/server.py +++ b/unraid_mcp/server.py @@ -15,8 +15,11 @@ from .config.settings import ( UNRAID_MCP_HOST, UNRAID_MCP_PORT, UNRAID_MCP_TRANSPORT, + UNRAID_VERIFY_SSL, VERSION, + validate_required_config, ) +from .subscriptions.diagnostics import register_diagnostic_tools from .subscriptions.resources import register_subscription_resources from .tools.array import register_array_tool from .tools.docker import register_docker_tool @@ -44,9 +47,10 @@ mcp = FastMCP( def register_all_modules() -> None: """Register all tools and resources with the MCP instance.""" try: - # Register subscription resources first + # Register subscription resources and diagnostic tools register_subscription_resources(mcp) - logger.info("Subscription resources registered") + register_diagnostic_tools(mcp) + logger.info("Subscription resources and diagnostic tools registered") # Register all consolidated tools registrars = [ @@ -73,6 +77,15 @@ def register_all_modules() -> None: def run_server() -> None: """Run the MCP server with the configured transport.""" + # Validate required configuration before anything else + is_valid, missing = validate_required_config() + if not is_valid: + logger.critical( + f"Missing required configuration: {', '.join(missing)}. " + "Set these environment variables or add them to your .env file." + ) + sys.exit(1) + # Log configuration if UNRAID_API_URL: logger.info(f"UNRAID_API_URL loaded: {UNRAID_API_URL[:20]}...") @@ -88,6 +101,13 @@ def run_server() -> None: logger.info(f"UNRAID_MCP_HOST set to: {UNRAID_MCP_HOST}") logger.info(f"UNRAID_MCP_TRANSPORT set to: {UNRAID_MCP_TRANSPORT}") + if UNRAID_VERIFY_SSL is False: + logger.warning( + "SSL VERIFICATION DISABLED (UNRAID_VERIFY_SSL=false). " + "Connections to Unraid API are vulnerable to man-in-the-middle attacks. " + "Only use this in trusted networks or for development." + ) + # Register all modules register_all_modules() diff --git a/unraid_mcp/subscriptions/diagnostics.py b/unraid_mcp/subscriptions/diagnostics.py index ea77e69..88da6e8 100644 --- a/unraid_mcp/subscriptions/diagnostics.py +++ b/unraid_mcp/subscriptions/diagnostics.py @@ -6,8 +6,10 @@ development and debugging purposes. """ import asyncio +import contextlib import json -from datetime import datetime +import re +from datetime import UTC, datetime from typing import Any import websockets @@ -19,7 +21,58 @@ from ..config.settings import UNRAID_API_KEY, UNRAID_API_URL from ..core.exceptions import ToolError from .manager import subscription_manager from .resources import ensure_subscriptions_started -from .utils import build_ws_ssl_context +from .utils import build_ws_ssl_context, build_ws_url + + +_ALLOWED_SUBSCRIPTION_NAMES = frozenset( + { + "logFileSubscription", + "containerStatsSubscription", + "cpuSubscription", + "memorySubscription", + "arraySubscription", + "networkSubscription", + "dockerSubscription", + "vmSubscription", + } +) + +# Pattern: must start with "subscription", contain only a known subscription name, +# and not contain mutation/query keywords or semicolons (prevents injection). +_SUBSCRIPTION_NAME_PATTERN = re.compile(r"^\s*subscription\b[^{]*\{\s*(\w+)", re.IGNORECASE) +_FORBIDDEN_KEYWORDS = re.compile(r"\b(mutation|query)\b", re.IGNORECASE) + + +def _validate_subscription_query(query: str) -> str: + """Validate that a subscription query is safe to execute. + + Only allows subscription operations targeting whitelisted subscription names. + Rejects any query containing mutation/query keywords. + + Returns: + The extracted subscription name. + + Raises: + ToolError: If the query fails validation. + """ + if _FORBIDDEN_KEYWORDS.search(query): + raise ToolError("Query rejected: must be a subscription, not a mutation or query.") + + match = _SUBSCRIPTION_NAME_PATTERN.match(query) + if not match: + raise ToolError( + "Query rejected: must start with 'subscription' and contain a valid " + "subscription operation. Example: subscription { logFileSubscription { ... } }" + ) + + sub_name = match.group(1) + if sub_name not in _ALLOWED_SUBSCRIPTION_NAMES: + raise ToolError( + f"Subscription '{sub_name}' is not allowed. " + f"Allowed subscriptions: {sorted(_ALLOWED_SUBSCRIPTION_NAMES)}" + ) + + return sub_name def register_diagnostic_tools(mcp: FastMCP) -> None: @@ -34,6 +87,10 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: """Test a GraphQL subscription query directly to debug schema issues. Use this to find working subscription field names and structure. + Only whitelisted subscriptions are allowed (logFileSubscription, + containerStatsSubscription, cpuSubscription, memorySubscription, + arraySubscription, networkSubscription, dockerSubscription, + vmSubscription). Args: subscription_query: The GraphQL subscription query to test @@ -41,16 +98,16 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: Returns: Dict containing test results and response data """ - try: - logger.info(f"[TEST_SUBSCRIPTION] Testing query: {subscription_query}") + # Validate before any network I/O + sub_name = _validate_subscription_query(subscription_query) - # Build WebSocket URL - if not UNRAID_API_URL: - raise ToolError("UNRAID_API_URL is not configured") - ws_url = ( - UNRAID_API_URL.replace("https://", "wss://").replace("http://", "ws://") - + "/graphql" - ) + try: + logger.info(f"[TEST_SUBSCRIPTION] Testing validated subscription '{sub_name}'") + + try: + ws_url = build_ws_url() + except ValueError as e: + raise ToolError(str(e)) from e ssl_context = build_ws_ssl_context(ws_url) @@ -59,6 +116,7 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: ws_url, subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")], ssl=ssl_context, + open_timeout=10, ping_interval=30, ping_timeout=10, ) as websocket: @@ -122,14 +180,14 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: logger.info("[DIAGNOSTIC] Running subscription diagnostics...") # Get comprehensive status - status = subscription_manager.get_subscription_status() + status = await subscription_manager.get_subscription_status() # Initialize connection issues list with proper type connection_issues: list[dict[str, Any]] = [] # Add environment info with explicit typing diagnostic_info: dict[str, Any] = { - "timestamp": datetime.now().isoformat(), + "timestamp": datetime.now(UTC).isoformat(), "environment": { "auto_start_enabled": subscription_manager.auto_start_enabled, "max_reconnect_attempts": subscription_manager.max_reconnect_attempts, @@ -152,17 +210,9 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: }, } - # Calculate WebSocket URL - if UNRAID_API_URL: - if UNRAID_API_URL.startswith("https://"): - ws_url = "wss://" + UNRAID_API_URL[len("https://") :] - elif UNRAID_API_URL.startswith("http://"): - ws_url = "ws://" + UNRAID_API_URL[len("http://") :] - else: - ws_url = UNRAID_API_URL - if not ws_url.endswith("/graphql"): - ws_url = ws_url.rstrip("/") + "/graphql" - diagnostic_info["environment"]["websocket_url"] = ws_url + # Calculate WebSocket URL (stays None if UNRAID_API_URL not configured) + with contextlib.suppress(ValueError): + diagnostic_info["environment"]["websocket_url"] = build_ws_url() # Analyze issues for sub_name, sub_status in status.items(): diff --git a/unraid_mcp/subscriptions/manager.py b/unraid_mcp/subscriptions/manager.py index c98be94..75b948d 100644 --- a/unraid_mcp/subscriptions/manager.py +++ b/unraid_mcp/subscriptions/manager.py @@ -8,16 +8,50 @@ error handling, reconnection logic, and authentication. import asyncio import json import os -from datetime import datetime +import time +from datetime import UTC, datetime from typing import Any import websockets from websockets.typing import Subprotocol from ..config.logging import logger -from ..config.settings import UNRAID_API_KEY, UNRAID_API_URL +from ..config.settings import UNRAID_API_KEY +from ..core.client import _redact_sensitive from ..core.types import SubscriptionData -from .utils import build_ws_ssl_context +from .utils import build_ws_ssl_context, build_ws_url + + +# Resource data size limits to prevent unbounded memory growth +_MAX_RESOURCE_DATA_BYTES = 1_048_576 # 1MB +_MAX_RESOURCE_DATA_LINES = 5_000 +# Minimum stable connection duration (seconds) before resetting reconnect counter +_STABLE_CONNECTION_SECONDS = 30 + + +def _cap_log_content(data: dict[str, Any]) -> dict[str, Any]: + """Cap log content in subscription data to prevent unbounded memory growth. + + If the data contains a 'content' field (from log subscriptions) that exceeds + size limits, truncate to the most recent _MAX_RESOURCE_DATA_LINES lines. + """ + for key, value in data.items(): + if isinstance(value, dict): + data[key] = _cap_log_content(value) + elif ( + key == "content" + and isinstance(value, str) + and len(value.encode("utf-8", errors="replace")) > _MAX_RESOURCE_DATA_BYTES + ): + lines = value.splitlines() + if len(lines) > _MAX_RESOURCE_DATA_LINES: + truncated = "\n".join(lines[-_MAX_RESOURCE_DATA_LINES:]) + logger.warning( + f"[RESOURCE] Capped log content from {len(lines)} to " + f"{_MAX_RESOURCE_DATA_LINES} lines ({len(value)} -> {len(truncated)} chars)" + ) + data[key] = truncated + return data class SubscriptionManager: @@ -26,7 +60,6 @@ class SubscriptionManager: def __init__(self) -> None: self.active_subscriptions: dict[str, asyncio.Task[None]] = {} self.resource_data: dict[str, SubscriptionData] = {} - self.websocket: websockets.WebSocketServerProtocol | None = None self.subscription_lock = asyncio.Lock() # Configuration @@ -37,6 +70,7 @@ class SubscriptionManager: self.max_reconnect_attempts = int(os.getenv("UNRAID_MAX_RECONNECT_ATTEMPTS", "10")) self.connection_states: dict[str, str] = {} # Track connection state per subscription self.last_error: dict[str, str] = {} # Track last error per subscription + self._connection_start_times: dict[str, float] = {} # Track when connections started # Define subscription configurations self.subscription_configs = { @@ -165,20 +199,7 @@ class SubscriptionManager: break try: - # Build WebSocket URL with detailed logging - if not UNRAID_API_URL: - raise ValueError("UNRAID_API_URL is not configured") - - if UNRAID_API_URL.startswith("https://"): - ws_url = "wss://" + UNRAID_API_URL[len("https://") :] - elif UNRAID_API_URL.startswith("http://"): - ws_url = "ws://" + UNRAID_API_URL[len("http://") :] - else: - ws_url = UNRAID_API_URL - - if not ws_url.endswith("/graphql"): - ws_url = ws_url.rstrip("/") + "/graphql" - + ws_url = build_ws_url() logger.debug(f"[WEBSOCKET:{subscription_name}] Connecting to: {ws_url}") logger.debug( f"[WEBSOCKET:{subscription_name}] API Key present: {'Yes' if UNRAID_API_KEY else 'No'}" @@ -195,6 +216,7 @@ class SubscriptionManager: async with websockets.connect( ws_url, subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")], + open_timeout=connect_timeout, ping_interval=20, ping_timeout=10, close_timeout=10, @@ -206,9 +228,9 @@ class SubscriptionManager: ) self.connection_states[subscription_name] = "connected" - # Reset retry count on successful connection - self.reconnect_attempts[subscription_name] = 0 - retry_delay = 5 # Reset delay + # Track connection start time — only reset retry counter + # after the connection proves stable (>30s connected) + self._connection_start_times[subscription_name] = time.monotonic() # Initialize GraphQL-WS protocol logger.debug( @@ -290,7 +312,9 @@ class SubscriptionManager: f"[SUBSCRIPTION:{subscription_name}] Subscription message type: {start_type}" ) logger.debug(f"[SUBSCRIPTION:{subscription_name}] Query: {query[:100]}...") - logger.debug(f"[SUBSCRIPTION:{subscription_name}] Variables: {variables}") + logger.debug( + f"[SUBSCRIPTION:{subscription_name}] Variables: {_redact_sensitive(variables)}" + ) await websocket.send(json.dumps(subscription_message)) logger.info( @@ -326,9 +350,14 @@ class SubscriptionManager: logger.info( f"[DATA:{subscription_name}] Received subscription data update" ) + capped_data = ( + _cap_log_content(payload["data"]) + if isinstance(payload["data"], dict) + else payload["data"] + ) self.resource_data[subscription_name] = SubscriptionData( - data=payload["data"], - last_updated=datetime.now(), + data=capped_data, + last_updated=datetime.now(UTC), subscription_type=subscription_name, ) logger.debug( @@ -427,6 +456,26 @@ class SubscriptionManager: self.last_error[subscription_name] = error_msg self.connection_states[subscription_name] = "error" + # Check if connection was stable before deciding on retry behavior + start_time = self._connection_start_times.get(subscription_name) + if start_time is not None: + connected_duration = time.monotonic() - start_time + if connected_duration >= _STABLE_CONNECTION_SECONDS: + # Connection was stable — reset retry counter and backoff + logger.info( + f"[WEBSOCKET:{subscription_name}] Connection was stable " + f"({connected_duration:.0f}s >= {_STABLE_CONNECTION_SECONDS}s), " + f"resetting retry counter" + ) + self.reconnect_attempts[subscription_name] = 0 + retry_delay = 5 + else: + logger.warning( + f"[WEBSOCKET:{subscription_name}] Connection was unstable " + f"({connected_duration:.0f}s < {_STABLE_CONNECTION_SECONDS}s), " + f"keeping retry counter at {self.reconnect_attempts.get(subscription_name, 0)}" + ) + # Calculate backoff delay retry_delay = min(retry_delay * 1.5, max_retry_delay) logger.info( @@ -435,15 +484,16 @@ class SubscriptionManager: self.connection_states[subscription_name] = "reconnecting" await asyncio.sleep(retry_delay) - def get_resource_data(self, resource_name: str) -> dict[str, Any] | None: + async def get_resource_data(self, resource_name: str) -> dict[str, Any] | None: """Get current resource data with enhanced logging.""" logger.debug(f"[RESOURCE:{resource_name}] Resource data requested") - if resource_name in self.resource_data: - data = self.resource_data[resource_name] - age_seconds = (datetime.now() - data.last_updated).total_seconds() - logger.debug(f"[RESOURCE:{resource_name}] Data found, age: {age_seconds:.1f}s") - return data.data + async with self.subscription_lock: + if resource_name in self.resource_data: + data = self.resource_data[resource_name] + age_seconds = (datetime.now(UTC) - data.last_updated).total_seconds() + logger.debug(f"[RESOURCE:{resource_name}] Data found, age: {age_seconds:.1f}s") + return data.data logger.debug(f"[RESOURCE:{resource_name}] No data available") return None @@ -453,38 +503,39 @@ class SubscriptionManager: logger.debug(f"[SUBSCRIPTION_MANAGER] Active subscriptions: {active}") return active - def get_subscription_status(self) -> dict[str, dict[str, Any]]: + async def get_subscription_status(self) -> dict[str, dict[str, Any]]: """Get detailed status of all subscriptions for diagnostics.""" status = {} - for sub_name, config in self.subscription_configs.items(): - sub_status = { - "config": { - "resource": config["resource"], - "description": config["description"], - "auto_start": config.get("auto_start", False), - }, - "runtime": { - "active": sub_name in self.active_subscriptions, - "connection_state": self.connection_states.get(sub_name, "not_started"), - "reconnect_attempts": self.reconnect_attempts.get(sub_name, 0), - "last_error": self.last_error.get(sub_name, None), - }, - } - - # Add data info if available - if sub_name in self.resource_data: - data_info = self.resource_data[sub_name] - age_seconds = (datetime.now() - data_info.last_updated).total_seconds() - sub_status["data"] = { - "available": True, - "last_updated": data_info.last_updated.isoformat(), - "age_seconds": age_seconds, + async with self.subscription_lock: + for sub_name, config in self.subscription_configs.items(): + sub_status = { + "config": { + "resource": config["resource"], + "description": config["description"], + "auto_start": config.get("auto_start", False), + }, + "runtime": { + "active": sub_name in self.active_subscriptions, + "connection_state": self.connection_states.get(sub_name, "not_started"), + "reconnect_attempts": self.reconnect_attempts.get(sub_name, 0), + "last_error": self.last_error.get(sub_name, None), + }, } - else: - sub_status["data"] = {"available": False} - status[sub_name] = sub_status + # Add data info if available + if sub_name in self.resource_data: + data_info = self.resource_data[sub_name] + age_seconds = (datetime.now(UTC) - data_info.last_updated).total_seconds() + sub_status["data"] = { + "available": True, + "last_updated": data_info.last_updated.isoformat(), + "age_seconds": age_seconds, + } + else: + sub_status["data"] = {"available": False} + + status[sub_name] = sub_status logger.debug(f"[SUBSCRIPTION_MANAGER] Generated status for {len(status)} subscriptions") return status diff --git a/unraid_mcp/subscriptions/resources.py b/unraid_mcp/subscriptions/resources.py index f1b4caf..f80a708 100644 --- a/unraid_mcp/subscriptions/resources.py +++ b/unraid_mcp/subscriptions/resources.py @@ -82,7 +82,7 @@ def register_subscription_resources(mcp: FastMCP) -> None: async def logs_stream_resource() -> str: """Real-time log stream data from subscription.""" await ensure_subscriptions_started() - data = subscription_manager.get_resource_data("logFileSubscription") + data = await subscription_manager.get_resource_data("logFileSubscription") if data: return json.dumps(data, indent=2) return json.dumps( diff --git a/unraid_mcp/subscriptions/utils.py b/unraid_mcp/subscriptions/utils.py index 63674a3..45c3634 100644 --- a/unraid_mcp/subscriptions/utils.py +++ b/unraid_mcp/subscriptions/utils.py @@ -2,7 +2,34 @@ import ssl as _ssl -from ..config.settings import UNRAID_VERIFY_SSL +from ..config.settings import UNRAID_API_URL, UNRAID_VERIFY_SSL + + +def build_ws_url() -> str: + """Build a WebSocket URL from the configured UNRAID_API_URL. + + Converts http(s) scheme to ws(s) and ensures /graphql path suffix. + + Returns: + The WebSocket URL string (e.g. "wss://10.1.0.2:31337/graphql"). + + Raises: + ValueError: If UNRAID_API_URL is not configured. + """ + if not UNRAID_API_URL: + raise ValueError("UNRAID_API_URL is not configured") + + if UNRAID_API_URL.startswith("https://"): + ws_url = "wss://" + UNRAID_API_URL[len("https://") :] + elif UNRAID_API_URL.startswith("http://"): + ws_url = "ws://" + UNRAID_API_URL[len("http://") :] + else: + ws_url = UNRAID_API_URL + + if not ws_url.endswith("/graphql"): + ws_url = ws_url.rstrip("/") + "/graphql" + + return ws_url def build_ws_ssl_context(ws_url: str) -> _ssl.SSLContext | None: diff --git a/unraid_mcp/tools/array.py b/unraid_mcp/tools/array.py index 5cf132f..0afe755 100644 --- a/unraid_mcp/tools/array.py +++ b/unraid_mcp/tools/array.py @@ -9,7 +9,7 @@ from fastmcp import FastMCP from ..config.logging import logger from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler QUERIES: dict[str, str] = { @@ -74,7 +74,7 @@ def register_array_tool(mcp: FastMCP) -> None: if action not in ALL_ACTIONS: raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") - try: + with tool_error_handler("array", action, logger): logger.info(f"Executing unraid_array action={action}") if action in QUERIES: @@ -95,10 +95,4 @@ def register_array_tool(mcp: FastMCP) -> None: "data": data, } - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_array action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute array/{action}: {e!s}") from e - logger.info("Array tool registered successfully") diff --git a/unraid_mcp/tools/docker.py b/unraid_mcp/tools/docker.py index b665e47..0568f64 100644 --- a/unraid_mcp/tools/docker.py +++ b/unraid_mcp/tools/docker.py @@ -11,7 +11,8 @@ from fastmcp import FastMCP from ..config.logging import logger from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler +from ..core.utils import safe_get QUERIES: dict[str, str] = { @@ -99,6 +100,10 @@ MUTATIONS: dict[str, str] = { } DESTRUCTIVE_ACTIONS = {"remove"} +_MUTATION_ACTIONS = {"start", "stop", "restart", "pause", "unpause", "remove", "update"} +# NOTE (Code-M-07): "details" and "logs" are listed here because they require a +# container_id parameter, but unlike mutations they use fuzzy name matching (not +# strict). This is intentional: read-only queries are safe with fuzzy matching. _ACTIONS_REQUIRING_CONTAINER_ID = { "start", "stop", @@ -111,6 +116,7 @@ _ACTIONS_REQUIRING_CONTAINER_ID = { "logs", } ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) | {"restart"} +_MAX_TAIL_LINES = 10_000 DOCKER_ACTIONS = Literal[ "list", @@ -130,33 +136,28 @@ DOCKER_ACTIONS = Literal[ "check_updates", ] -# Docker container IDs: 64 hex chars + optional suffix (e.g., ":local") +# Full PrefixedID: 64 hex chars + optional suffix (e.g., ":local") _DOCKER_ID_PATTERN = re.compile(r"^[a-f0-9]{64}(:[a-z0-9]+)?$", re.IGNORECASE) - -def _safe_get(data: dict[str, Any], *keys: str, default: Any = None) -> Any: - """Safely traverse nested dict keys, handling None intermediates.""" - current = data - for key in keys: - if not isinstance(current, dict): - return default - current = current.get(key) - return current if current is not None else default +# Short hex prefix: at least 12 hex chars (standard Docker short ID length) +_DOCKER_SHORT_ID_PATTERN = re.compile(r"^[a-f0-9]{12,63}$", re.IGNORECASE) def find_container_by_identifier( - identifier: str, containers: list[dict[str, Any]] + identifier: str, containers: list[dict[str, Any]], *, strict: bool = False ) -> dict[str, Any] | None: - """Find a container by ID or name with fuzzy matching. + """Find a container by ID or name with optional fuzzy matching. Match priority: 1. Exact ID match 2. Exact name match (case-sensitive) + + When strict=False (default), also tries: 3. Name starts with identifier (case-insensitive) 4. Name contains identifier as substring (case-insensitive) - Note: Short identifiers (e.g. "db") may match unintended containers - via substring. Use more specific names or IDs for precision. + When strict=True, only exact matches (1 & 2) are used. + Use strict=True for mutations to prevent targeting the wrong container. """ if not containers: return None @@ -168,20 +169,24 @@ def find_container_by_identifier( if identifier in c.get("names", []): return c + # Strict mode: no fuzzy matching allowed + if strict: + return None + id_lower = identifier.lower() # Priority 3: prefix match (more precise than substring) for c in containers: for name in c.get("names", []): if name.lower().startswith(id_lower): - logger.info(f"Prefix match: '{identifier}' -> '{name}'") + logger.debug(f"Prefix match: '{identifier}' -> '{name}'") return c # Priority 4: substring match (least precise) for c in containers: for name in c.get("names", []): if id_lower in name.lower(): - logger.info(f"Substring match: '{identifier}' -> '{name}'") + logger.debug(f"Substring match: '{identifier}' -> '{name}'") return c return None @@ -195,27 +200,62 @@ def get_available_container_names(containers: list[dict[str, Any]]) -> list[str] return names -async def _resolve_container_id(container_id: str) -> str: - """Resolve a container name/identifier to its actual PrefixedID.""" +def _looks_like_container_id(identifier: str) -> bool: + """Check if an identifier looks like a container ID (full or short hex prefix).""" + return bool(_DOCKER_ID_PATTERN.match(identifier) or _DOCKER_SHORT_ID_PATTERN.match(identifier)) + + +async def _resolve_container_id(container_id: str, *, strict: bool = False) -> str: + """Resolve a container name/identifier to its actual PrefixedID. + + Optimization: if the identifier is a full 64-char hex ID (with optional + :suffix), skip the container list fetch entirely and use it directly. + If it's a short hex prefix (12-63 chars), fetch the list and match by + ID prefix. Only fetch the container list for name-based lookups. + + Args: + container_id: Container name or ID to resolve + strict: When True, only exact name/ID matches are allowed (no fuzzy). + Use for mutations to prevent targeting the wrong container. + """ + # Full PrefixedID: skip the list fetch entirely if _DOCKER_ID_PATTERN.match(container_id): return container_id - logger.info(f"Resolving container identifier '{container_id}'") + logger.info(f"Resolving container identifier '{container_id}' (strict={strict})") list_query = """ query ResolveContainerID { docker { containers(skipCache: true) { id names } } } """ data = await make_graphql_request(list_query) - containers = _safe_get(data, "docker", "containers", default=[]) - resolved = find_container_by_identifier(container_id, containers) + containers = safe_get(data, "docker", "containers", default=[]) + + # Short hex prefix: match by ID prefix before trying name matching + if _DOCKER_SHORT_ID_PATTERN.match(container_id): + id_lower = container_id.lower() + for c in containers: + cid = (c.get("id") or "").lower() + if cid.startswith(id_lower) or cid.split(":")[0].startswith(id_lower): + actual_id = str(c.get("id", "")) + logger.info(f"Resolved short ID '{container_id}' -> '{actual_id}'") + return actual_id + + resolved = find_container_by_identifier(container_id, containers, strict=strict) if resolved: actual_id = str(resolved.get("id", "")) logger.info(f"Resolved '{container_id}' -> '{actual_id}'") return actual_id available = get_available_container_names(containers) - msg = f"Container '{container_id}' not found." + if strict: + msg = ( + f"Container '{container_id}' not found by exact match. " + f"Mutations require an exact container name or full ID — " + f"fuzzy/substring matching is not allowed for safety." + ) + else: + msg = f"Container '{container_id}' not found." if available: msg += f" Available: {', '.join(available[:10])}" raise ToolError(msg) @@ -264,38 +304,40 @@ def register_docker_tool(mcp: FastMCP) -> None: if action == "network_details" and not network_id: raise ToolError("network_id is required for 'network_details' action") - try: + if tail_lines < 1 or tail_lines > _MAX_TAIL_LINES: + raise ToolError(f"tail_lines must be between 1 and {_MAX_TAIL_LINES}, got {tail_lines}") + + with tool_error_handler("docker", action, logger): logger.info(f"Executing unraid_docker action={action}") # --- Read-only queries --- if action == "list": data = await make_graphql_request(QUERIES["list"]) - containers = _safe_get(data, "docker", "containers", default=[]) - return {"containers": list(containers) if isinstance(containers, list) else []} + containers = safe_get(data, "docker", "containers", default=[]) + return {"containers": containers} if action == "details": + # Resolve name -> ID first (skips list fetch if already an ID) + actual_id = await _resolve_container_id(container_id or "") data = await make_graphql_request(QUERIES["details"]) - containers = _safe_get(data, "docker", "containers", default=[]) - container = find_container_by_identifier(container_id or "", containers) - if container: - return container - available = get_available_container_names(containers) - msg = f"Container '{container_id}' not found." - if available: - msg += f" Available: {', '.join(available[:10])}" - raise ToolError(msg) + containers = safe_get(data, "docker", "containers", default=[]) + # Match by resolved ID (exact match, no second list fetch needed) + for c in containers: + if c.get("id") == actual_id: + return c + raise ToolError(f"Container '{container_id}' not found in details response.") if action == "logs": actual_id = await _resolve_container_id(container_id or "") data = await make_graphql_request( QUERIES["logs"], {"id": actual_id, "tail": tail_lines} ) - return {"logs": _safe_get(data, "docker", "logs")} + return {"logs": safe_get(data, "docker", "logs")} if action == "networks": data = await make_graphql_request(QUERIES["networks"]) networks = data.get("dockerNetworks", []) - return {"networks": list(networks) if isinstance(networks, list) else []} + return {"networks": networks} if action == "network_details": data = await make_graphql_request(QUERIES["network_details"], {"id": network_id}) @@ -303,17 +345,17 @@ def register_docker_tool(mcp: FastMCP) -> None: if action == "port_conflicts": data = await make_graphql_request(QUERIES["port_conflicts"]) - conflicts = _safe_get(data, "docker", "portConflicts", default=[]) - return {"port_conflicts": list(conflicts) if isinstance(conflicts, list) else []} + conflicts = safe_get(data, "docker", "portConflicts", default=[]) + return {"port_conflicts": conflicts} if action == "check_updates": data = await make_graphql_request(QUERIES["check_updates"]) - statuses = _safe_get(data, "docker", "containerUpdateStatuses", default=[]) - return {"update_statuses": list(statuses) if isinstance(statuses, list) else []} + statuses = safe_get(data, "docker", "containerUpdateStatuses", default=[]) + return {"update_statuses": statuses} - # --- Mutations --- + # --- Mutations (strict matching: no fuzzy/substring) --- if action == "restart": - actual_id = await _resolve_container_id(container_id or "") + actual_id = await _resolve_container_id(container_id or "", strict=True) # Stop (idempotent: treat "already stopped" as success) stop_data = await make_graphql_request( MUTATIONS["stop"], @@ -330,7 +372,7 @@ def register_docker_tool(mcp: FastMCP) -> None: if start_data.get("idempotent_success"): result = {} else: - result = _safe_get(start_data, "docker", "start", default={}) + result = safe_get(start_data, "docker", "start", default={}) response: dict[str, Any] = { "success": True, "action": "restart", @@ -342,12 +384,12 @@ def register_docker_tool(mcp: FastMCP) -> None: if action == "update_all": data = await make_graphql_request(MUTATIONS["update_all"]) - results = _safe_get(data, "docker", "updateAllContainers", default=[]) + results = safe_get(data, "docker", "updateAllContainers", default=[]) return {"success": True, "action": "update_all", "containers": results} # Single-container mutations if action in MUTATIONS: - actual_id = await _resolve_container_id(container_id or "") + actual_id = await _resolve_container_id(container_id or "", strict=True) op_context: dict[str, str] | None = ( {"operation": action} if action in ("start", "stop") else None ) @@ -382,10 +424,4 @@ def register_docker_tool(mcp: FastMCP) -> None: raise ToolError(f"Unhandled action '{action}' — this is a bug") - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_docker action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute docker/{action}: {e!s}") from e - logger.info("Docker tool registered successfully") diff --git a/unraid_mcp/tools/health.py b/unraid_mcp/tools/health.py index eae568a..f378e6d 100644 --- a/unraid_mcp/tools/health.py +++ b/unraid_mcp/tools/health.py @@ -7,6 +7,7 @@ connection testing, and subscription diagnostics. import datetime import time from typing import Any, Literal +from urllib.parse import urlparse from fastmcp import FastMCP @@ -19,9 +20,30 @@ from ..config.settings import ( VERSION, ) from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler +def _safe_display_url(url: str | None) -> str | None: + """Return a redacted URL showing only scheme + host + port. + + Strips path, query parameters, credentials, and fragments to avoid + leaking internal network topology or embedded secrets (CWE-200). + """ + if not url: + return None + try: + parsed = urlparse(url) + host = parsed.hostname or "unknown" + if parsed.port: + return f"{parsed.scheme}://{host}:{parsed.port}" + return f"{parsed.scheme}://{host}" + except Exception: + # If parsing fails, show nothing rather than leaking the raw URL + return "" + + +ALL_ACTIONS = {"check", "test_connection", "diagnose"} + HEALTH_ACTIONS = Literal["check", "test_connection", "diagnose"] # Severity ordering: only upgrade, never downgrade @@ -53,12 +75,10 @@ def register_health_tool(mcp: FastMCP) -> None: test_connection - Quick connectivity test (just checks { online }) diagnose - Subscription system diagnostics """ - if action not in ("check", "test_connection", "diagnose"): - raise ToolError( - f"Invalid action '{action}'. Must be one of: check, test_connection, diagnose" - ) + if action not in ALL_ACTIONS: + raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") - try: + with tool_error_handler("health", action, logger): logger.info(f"Executing unraid_health action={action}") if action == "test_connection": @@ -79,12 +99,6 @@ def register_health_tool(mcp: FastMCP) -> None: raise ToolError(f"Unhandled action '{action}' — this is a bug") - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_health action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute health/{action}: {e!s}") from e - logger.info("Health tool registered successfully") @@ -111,7 +125,7 @@ async def _comprehensive_check() -> dict[str, Any]: overview { unread { alert warning total } } } docker { - containers(skipCache: true) { id state status } + containers { id state status } } } """ @@ -135,7 +149,7 @@ async def _comprehensive_check() -> dict[str, Any]: if info: health_info["unraid_system"] = { "status": "connected", - "url": UNRAID_API_URL, + "url": _safe_display_url(UNRAID_API_URL), "machine_id": info.get("machineId"), "version": info.get("versions", {}).get("unraid"), "uptime": info.get("os", {}).get("uptime"), @@ -215,6 +229,42 @@ async def _comprehensive_check() -> dict[str, Any]: } +def _analyze_subscription_status( + status: dict[str, Any], +) -> tuple[int, list[dict[str, Any]]]: + """Analyze subscription status dict, returning error count and connection issues. + + This is the canonical implementation of subscription status analysis. + TODO: subscriptions/diagnostics.py (lines 168-182) duplicates this logic. + That module should be refactored to call this helper once file ownership + allows cross-agent edits. See Code-H05. + + Args: + status: Dict of subscription name -> status info from get_subscription_status(). + + Returns: + Tuple of (error_count, connection_issues_list). + """ + error_count = 0 + connection_issues: list[dict[str, Any]] = [] + + for sub_name, sub_status in status.items(): + runtime = sub_status.get("runtime", {}) + conn_state = runtime.get("connection_state", "unknown") + if conn_state in ("error", "auth_failed", "timeout", "max_retries_exceeded"): + error_count += 1 + if runtime.get("last_error"): + connection_issues.append( + { + "subscription": sub_name, + "state": conn_state, + "error": runtime["last_error"], + } + ) + + return error_count, connection_issues + + async def _diagnose_subscriptions() -> dict[str, Any]: """Import and run subscription diagnostics.""" try: @@ -223,13 +273,10 @@ async def _diagnose_subscriptions() -> dict[str, Any]: await ensure_subscriptions_started() - status = subscription_manager.get_subscription_status() - # This list is intentionally placed into the summary dict below and then - # appended to in the loop — the mutable alias ensures both references - # reflect the same data without a second pass. - connection_issues: list[dict[str, Any]] = [] + status = await subscription_manager.get_subscription_status() + error_count, connection_issues = _analyze_subscription_status(status) - diagnostic_info: dict[str, Any] = { + return { "timestamp": datetime.datetime.now(datetime.UTC).isoformat(), "environment": { "auto_start_enabled": subscription_manager.auto_start_enabled, @@ -241,27 +288,11 @@ async def _diagnose_subscriptions() -> dict[str, Any]: "total_configured": len(subscription_manager.subscription_configs), "active_count": len(subscription_manager.active_subscriptions), "with_data": len(subscription_manager.resource_data), - "in_error_state": 0, + "in_error_state": error_count, "connection_issues": connection_issues, }, } - for sub_name, sub_status in status.items(): - runtime = sub_status.get("runtime", {}) - conn_state = runtime.get("connection_state", "unknown") - if conn_state in ("error", "auth_failed", "timeout", "max_retries_exceeded"): - diagnostic_info["summary"]["in_error_state"] += 1 - if runtime.get("last_error"): - connection_issues.append( - { - "subscription": sub_name, - "state": conn_state, - "error": runtime["last_error"], - } - ) - - return diagnostic_info - except ImportError: return { "error": "Subscription modules not available", diff --git a/unraid_mcp/tools/info.py b/unraid_mcp/tools/info.py index cdefcb3..b1287bb 100644 --- a/unraid_mcp/tools/info.py +++ b/unraid_mcp/tools/info.py @@ -10,7 +10,8 @@ from fastmcp import FastMCP from ..config.logging import logger from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler +from ..core.utils import format_kb # Pre-built queries keyed by action name @@ -19,7 +20,7 @@ QUERIES: dict[str, str] = { query GetSystemInfo { info { os { platform distro release codename kernel arch hostname codepage logofile serial build uptime } - cpu { manufacturer brand vendor family model stepping revision voltage speed speedmin speedmax threads cores processors socket cache flags } + cpu { manufacturer brand vendor family model stepping revision voltage speed speedmin speedmax threads cores processors socket cache } memory { layout { bank type clockSpeed formFactor manufacturer partNum serialNum } } @@ -81,7 +82,6 @@ QUERIES: dict[str, str] = { shareAvahiEnabled safeMode startMode configValid configError joinStatus deviceCount flashGuid flashProduct flashVendor mdState mdVersion shareCount shareSmbCount shareNfsCount shareAfpCount shareMoverActive - csrfToken } } """, @@ -156,6 +156,8 @@ QUERIES: dict[str, str] = { """, } +ALL_ACTIONS = set(QUERIES) + INFO_ACTIONS = Literal[ "overview", "array", @@ -178,9 +180,13 @@ INFO_ACTIONS = Literal[ "ups_config", ] -assert set(QUERIES.keys()) == set(INFO_ACTIONS.__args__), ( - "QUERIES keys and INFO_ACTIONS are out of sync" -) +if set(INFO_ACTIONS.__args__) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(INFO_ACTIONS.__args__) + _extra = set(INFO_ACTIONS.__args__) - ALL_ACTIONS + raise RuntimeError( + f"QUERIES keys and INFO_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) def _process_system_info(raw_info: dict[str, Any]) -> dict[str, Any]: @@ -189,17 +195,17 @@ def _process_system_info(raw_info: dict[str, Any]) -> dict[str, Any]: if raw_info.get("os"): os_info = raw_info["os"] summary["os"] = ( - f"{os_info.get('distro', '')} {os_info.get('release', '')} " - f"({os_info.get('platform', '')}, {os_info.get('arch', '')})" + f"{os_info.get('distro') or 'unknown'} {os_info.get('release') or 'unknown'} " + f"({os_info.get('platform') or 'unknown'}, {os_info.get('arch') or 'unknown'})" ) - summary["hostname"] = os_info.get("hostname") + summary["hostname"] = os_info.get("hostname") or "unknown" summary["uptime"] = os_info.get("uptime") if raw_info.get("cpu"): cpu = raw_info["cpu"] summary["cpu"] = ( - f"{cpu.get('manufacturer', '')} {cpu.get('brand', '')} " - f"({cpu.get('cores', '?')} cores, {cpu.get('threads', '?')} threads)" + f"{cpu.get('manufacturer') or 'unknown'} {cpu.get('brand') or 'unknown'} " + f"({cpu.get('cores') or '?'} cores, {cpu.get('threads') or '?'} threads)" ) if raw_info.get("memory") and raw_info["memory"].get("layout"): @@ -207,10 +213,10 @@ def _process_system_info(raw_info: dict[str, Any]) -> dict[str, Any]: summary["memory_layout_details"] = [] for stick in mem_layout: summary["memory_layout_details"].append( - f"Bank {stick.get('bank', '?')}: Type {stick.get('type', '?')}, " - f"Speed {stick.get('clockSpeed', '?')}MHz, " - f"Manufacturer: {stick.get('manufacturer', '?')}, " - f"Part: {stick.get('partNum', '?')}" + f"Bank {stick.get('bank') or '?'}: Type {stick.get('type') or '?'}, " + f"Speed {stick.get('clockSpeed') or '?'}MHz, " + f"Manufacturer: {stick.get('manufacturer') or '?'}, " + f"Part: {stick.get('partNum') or '?'}" ) summary["memory_summary"] = ( "Stick layout details retrieved. Overall total/used/free memory stats " @@ -255,31 +261,14 @@ def _analyze_disk_health(disks: list[dict[str, Any]]) -> dict[str, int]: return counts -def _format_kb(k: Any) -> str: - """Format kilobyte values into human-readable sizes.""" - if k is None: - return "N/A" - try: - k = int(k) - except (ValueError, TypeError): - return "N/A" - if k >= 1024 * 1024 * 1024: - return f"{k / (1024 * 1024 * 1024):.2f} TB" - if k >= 1024 * 1024: - return f"{k / (1024 * 1024):.2f} GB" - if k >= 1024: - return f"{k / 1024:.2f} MB" - return f"{k} KB" - - def _process_array_status(raw: dict[str, Any]) -> dict[str, Any]: """Process raw array data into summary + details.""" summary: dict[str, Any] = {"state": raw.get("state")} if raw.get("capacity") and raw["capacity"].get("kilobytes"): kb = raw["capacity"]["kilobytes"] - summary["capacity_total"] = _format_kb(kb.get("total")) - summary["capacity_used"] = _format_kb(kb.get("used")) - summary["capacity_free"] = _format_kb(kb.get("free")) + summary["capacity_total"] = format_kb(kb.get("total")) + summary["capacity_used"] = format_kb(kb.get("used")) + summary["capacity_free"] = format_kb(kb.get("free")) summary["num_parity_disks"] = len(raw.get("parities", [])) summary["num_data_disks"] = len(raw.get("disks", [])) @@ -345,8 +334,8 @@ def register_info_tool(mcp: FastMCP) -> None: ups_device - Single UPS device (requires device_id) ups_config - UPS configuration """ - if action not in QUERIES: - raise ToolError(f"Invalid action '{action}'. Must be one of: {list(QUERIES.keys())}") + if action not in ALL_ACTIONS: + raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") if action == "ups_device" and not device_id: raise ToolError("device_id is required for ups_device action") @@ -377,7 +366,7 @@ def register_info_tool(mcp: FastMCP) -> None: "ups_devices": ("upsDevices", "ups_devices"), } - try: + with tool_error_handler("info", action, logger): logger.info(f"Executing unraid_info action={action}") data = await make_graphql_request(query, variables) @@ -426,14 +415,8 @@ def register_info_tool(mcp: FastMCP) -> None: if action in list_actions: response_key, output_key = list_actions[action] items = data.get(response_key) or [] - return {output_key: list(items) if isinstance(items, list) else []} + return {output_key: items} raise ToolError(f"Unhandled action '{action}' — this is a bug") - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_info action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute info/{action}: {e!s}") from e - logger.info("Info tool registered successfully") diff --git a/unraid_mcp/tools/keys.py b/unraid_mcp/tools/keys.py index f556a85..be9c539 100644 --- a/unraid_mcp/tools/keys.py +++ b/unraid_mcp/tools/keys.py @@ -10,7 +10,7 @@ from fastmcp import FastMCP from ..config.logging import logger from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler QUERIES: dict[str, str] = { @@ -45,6 +45,7 @@ MUTATIONS: dict[str, str] = { } DESTRUCTIVE_ACTIONS = {"delete"} +ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) KEY_ACTIONS = Literal[ "list", @@ -76,14 +77,13 @@ def register_keys_tool(mcp: FastMCP) -> None: update - Update an API key (requires key_id; optional name, roles) delete - Delete API keys (requires key_id, confirm=True) """ - all_actions = set(QUERIES) | set(MUTATIONS) - if action not in all_actions: - raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(all_actions)}") + if action not in ALL_ACTIONS: + raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") if action in DESTRUCTIVE_ACTIONS and not confirm: raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") - try: + with tool_error_handler("keys", action, logger): logger.info(f"Executing unraid_keys action={action}") if action == "list": @@ -141,10 +141,4 @@ def register_keys_tool(mcp: FastMCP) -> None: raise ToolError(f"Unhandled action '{action}' — this is a bug") - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_keys action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute keys/{action}: {e!s}") from e - logger.info("Keys tool registered successfully") diff --git a/unraid_mcp/tools/notifications.py b/unraid_mcp/tools/notifications.py index 635d01a..0df7e2a 100644 --- a/unraid_mcp/tools/notifications.py +++ b/unraid_mcp/tools/notifications.py @@ -10,7 +10,7 @@ from fastmcp import FastMCP from ..config.logging import logger from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler QUERIES: dict[str, str] = { @@ -76,6 +76,8 @@ MUTATIONS: dict[str, str] = { } DESTRUCTIVE_ACTIONS = {"delete", "delete_archived"} +ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) +_VALID_IMPORTANCE = {"ALERT", "WARNING", "NORMAL"} NOTIFICATION_ACTIONS = Literal[ "overview", @@ -120,16 +122,13 @@ def register_notifications_tool(mcp: FastMCP) -> None: delete_archived - Delete all archived notifications (requires confirm=True) archive_all - Archive all notifications (optional importance filter) """ - all_actions = {**QUERIES, **MUTATIONS} - if action not in all_actions: - raise ToolError( - f"Invalid action '{action}'. Must be one of: {list(all_actions.keys())}" - ) + if action not in ALL_ACTIONS: + raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") if action in DESTRUCTIVE_ACTIONS and not confirm: raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") - try: + with tool_error_handler("notifications", action, logger): logger.info(f"Executing unraid_notifications action={action}") if action == "overview": @@ -147,18 +146,29 @@ def register_notifications_tool(mcp: FastMCP) -> None: filter_vars["importance"] = importance.upper() data = await make_graphql_request(QUERIES["list"], {"filter": filter_vars}) notifications = data.get("notifications", {}) - result = notifications.get("list", []) - return {"notifications": list(result) if isinstance(result, list) else []} + return {"notifications": notifications.get("list", [])} if action == "warnings": data = await make_graphql_request(QUERIES["warnings"]) notifications = data.get("notifications", {}) - result = notifications.get("warningsAndAlerts", []) - return {"warnings": list(result) if isinstance(result, list) else []} + return {"warnings": notifications.get("warningsAndAlerts", [])} if action == "create": if title is None or subject is None or description is None or importance is None: raise ToolError("create requires title, subject, description, and importance") + if importance.upper() not in _VALID_IMPORTANCE: + raise ToolError( + f"importance must be one of: {', '.join(sorted(_VALID_IMPORTANCE))}. " + f"Got: '{importance}'" + ) + if len(title) > 200: + raise ToolError(f"title must be at most 200 characters (got {len(title)})") + if len(subject) > 500: + raise ToolError(f"subject must be at most 500 characters (got {len(subject)})") + if len(description) > 2000: + raise ToolError( + f"description must be at most 2000 characters (got {len(description)})" + ) input_data = { "title": title, "subject": subject, @@ -196,10 +206,4 @@ def register_notifications_tool(mcp: FastMCP) -> None: raise ToolError(f"Unhandled action '{action}' — this is a bug") - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_notifications action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute notifications/{action}: {e!s}") from e - logger.info("Notifications tool registered successfully") diff --git a/unraid_mcp/tools/rclone.py b/unraid_mcp/tools/rclone.py index 1a496aa..7c091cd 100644 --- a/unraid_mcp/tools/rclone.py +++ b/unraid_mcp/tools/rclone.py @@ -4,13 +4,14 @@ Provides the `unraid_rclone` tool with 4 actions for managing cloud storage remotes (S3, Google Drive, Dropbox, FTP, etc.). """ +import re from typing import Any, Literal from fastmcp import FastMCP from ..config.logging import logger from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler QUERIES: dict[str, str] = { @@ -49,6 +50,51 @@ RCLONE_ACTIONS = Literal[ "delete_remote", ] +# Max config entries to prevent abuse +_MAX_CONFIG_KEYS = 50 +# Pattern for suspicious key names (path traversal, shell metacharacters) +_DANGEROUS_KEY_PATTERN = re.compile(r"[.]{2}|[/\\;|`$(){}]") +# Max length for individual config values +_MAX_VALUE_LENGTH = 4096 + + +def _validate_config_data(config_data: dict[str, Any]) -> dict[str, str]: + """Validate and sanitize rclone config_data before passing to GraphQL. + + Ensures all keys and values are safe strings with no injection vectors. + + Raises: + ToolError: If config_data contains invalid keys or values + """ + if len(config_data) > _MAX_CONFIG_KEYS: + raise ToolError(f"config_data has {len(config_data)} keys (max {_MAX_CONFIG_KEYS})") + + validated: dict[str, str] = {} + for key, value in config_data.items(): + if not isinstance(key, str) or not key.strip(): + raise ToolError( + f"config_data keys must be non-empty strings, got: {type(key).__name__}" + ) + if _DANGEROUS_KEY_PATTERN.search(key): + raise ToolError( + f"config_data key '{key}' contains disallowed characters " + f"(path traversal or shell metacharacters)" + ) + if not isinstance(value, (str, int, float, bool)): + raise ToolError( + f"config_data['{key}'] must be a string, number, or boolean, " + f"got: {type(value).__name__}" + ) + str_value = str(value) + if len(str_value) > _MAX_VALUE_LENGTH: + raise ToolError( + f"config_data['{key}'] value exceeds max length " + f"({len(str_value)} > {_MAX_VALUE_LENGTH})" + ) + validated[key] = str_value + + return validated + def register_rclone_tool(mcp: FastMCP) -> None: """Register the unraid_rclone tool with the FastMCP instance.""" @@ -75,7 +121,7 @@ def register_rclone_tool(mcp: FastMCP) -> None: if action in DESTRUCTIVE_ACTIONS and not confirm: raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") - try: + with tool_error_handler("rclone", action, logger): logger.info(f"Executing unraid_rclone action={action}") if action == "list_remotes": @@ -96,9 +142,10 @@ def register_rclone_tool(mcp: FastMCP) -> None: if action == "create_remote": if name is None or provider_type is None or config_data is None: raise ToolError("create_remote requires name, provider_type, and config_data") + validated_config = _validate_config_data(config_data) data = await make_graphql_request( MUTATIONS["create_remote"], - {"input": {"name": name, "type": provider_type, "config": config_data}}, + {"input": {"name": name, "type": provider_type, "config": validated_config}}, ) remote = data.get("rclone", {}).get("createRCloneRemote") if not remote: @@ -127,10 +174,4 @@ def register_rclone_tool(mcp: FastMCP) -> None: raise ToolError(f"Unhandled action '{action}' — this is a bug") - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_rclone action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute rclone/{action}: {e!s}") from e - logger.info("RClone tool registered successfully") diff --git a/unraid_mcp/tools/storage.py b/unraid_mcp/tools/storage.py index 60629ae..125595c 100644 --- a/unraid_mcp/tools/storage.py +++ b/unraid_mcp/tools/storage.py @@ -4,17 +4,19 @@ Provides the `unraid_storage` tool with 6 actions for shares, physical disks, unassigned devices, log files, and log content retrieval. """ +import os from typing import Any, Literal -import anyio from fastmcp import FastMCP from ..config.logging import logger from ..core.client import DISK_TIMEOUT, make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler +from ..core.utils import format_bytes _ALLOWED_LOG_PREFIXES = ("/var/log/", "/boot/logs/", "/mnt/") +_MAX_TAIL_LINES = 10_000 QUERIES: dict[str, str] = { "shares": """ @@ -56,6 +58,8 @@ QUERIES: dict[str, str] = { """, } +ALL_ACTIONS = set(QUERIES) + STORAGE_ACTIONS = Literal[ "shares", "disks", @@ -66,21 +70,6 @@ STORAGE_ACTIONS = Literal[ ] -def format_bytes(bytes_value: int | None) -> str: - """Format byte values into human-readable sizes.""" - if bytes_value is None: - return "N/A" - try: - value = float(int(bytes_value)) - except (ValueError, TypeError): - return "N/A" - for unit in ["B", "KB", "MB", "GB", "TB", "PB"]: - if value < 1024.0: - return f"{value:.2f} {unit}" - value /= 1024.0 - return f"{value:.2f} EB" - - def register_storage_tool(mcp: FastMCP) -> None: """Register the unraid_storage tool with the FastMCP instance.""" @@ -101,17 +90,22 @@ def register_storage_tool(mcp: FastMCP) -> None: log_files - List available log files logs - Retrieve log content (requires log_path, optional tail_lines) """ - if action not in QUERIES: - raise ToolError(f"Invalid action '{action}'. Must be one of: {list(QUERIES.keys())}") + if action not in ALL_ACTIONS: + raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") if action == "disk_details" and not disk_id: raise ToolError("disk_id is required for 'disk_details' action") + if tail_lines < 1 or tail_lines > _MAX_TAIL_LINES: + raise ToolError(f"tail_lines must be between 1 and {_MAX_TAIL_LINES}, got {tail_lines}") + if action == "logs": if not log_path: raise ToolError("log_path is required for 'logs' action") - # Resolve path to prevent traversal attacks (e.g. /var/log/../../etc/shadow) - normalized = str(await anyio.Path(log_path).resolve()) + # Resolve path synchronously to prevent traversal attacks. + # Using os.path.realpath instead of anyio.Path.resolve() because the + # async variant blocks on NFS-mounted paths under /mnt/ (Perf-AI-1). + normalized = os.path.realpath(log_path) # noqa: ASYNC240 if not any(normalized.startswith(p) for p in _ALLOWED_LOG_PREFIXES): raise ToolError( f"log_path must start with one of: {', '.join(_ALLOWED_LOG_PREFIXES)}. " @@ -128,17 +122,15 @@ def register_storage_tool(mcp: FastMCP) -> None: elif action == "logs": variables = {"path": log_path, "lines": tail_lines} - try: + with tool_error_handler("storage", action, logger): logger.info(f"Executing unraid_storage action={action}") data = await make_graphql_request(query, variables, custom_timeout=custom_timeout) if action == "shares": - shares = data.get("shares", []) - return {"shares": list(shares) if isinstance(shares, list) else []} + return {"shares": data.get("shares", [])} if action == "disks": - disks = data.get("disks", []) - return {"disks": list(disks) if isinstance(disks, list) else []} + return {"disks": data.get("disks", [])} if action == "disk_details": raw = data.get("disk", {}) @@ -159,22 +151,14 @@ def register_storage_tool(mcp: FastMCP) -> None: return {"summary": summary, "details": raw} if action == "unassigned": - devices = data.get("unassignedDevices", []) - return {"devices": list(devices) if isinstance(devices, list) else []} + return {"devices": data.get("unassignedDevices", [])} if action == "log_files": - files = data.get("logFiles", []) - return {"log_files": list(files) if isinstance(files, list) else []} + return {"log_files": data.get("logFiles", [])} if action == "logs": return dict(data.get("logFile") or {}) raise ToolError(f"Unhandled action '{action}' — this is a bug") - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_storage action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute storage/{action}: {e!s}") from e - logger.info("Storage tool registered successfully") diff --git a/unraid_mcp/tools/users.py b/unraid_mcp/tools/users.py index 2d9edab..cea4cc4 100644 --- a/unraid_mcp/tools/users.py +++ b/unraid_mcp/tools/users.py @@ -10,7 +10,7 @@ from fastmcp import FastMCP from ..config.logging import logger from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler QUERIES: dict[str, str] = { @@ -39,17 +39,11 @@ def register_users_tool(mcp: FastMCP) -> None: Note: Unraid API does not support user management operations (list, add, delete). """ if action not in ALL_ACTIONS: - raise ToolError(f"Invalid action '{action}'. Must be: me") + raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") - try: + with tool_error_handler("users", action, logger): logger.info("Executing unraid_users action=me") data = await make_graphql_request(QUERIES["me"]) return data.get("me") or {} - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_users action=me: {e}", exc_info=True) - raise ToolError(f"Failed to execute users/me: {e!s}") from e - logger.info("Users tool registered successfully") diff --git a/unraid_mcp/tools/virtualization.py b/unraid_mcp/tools/virtualization.py index 562c550..baa421a 100644 --- a/unraid_mcp/tools/virtualization.py +++ b/unraid_mcp/tools/virtualization.py @@ -10,7 +10,7 @@ from fastmcp import FastMCP from ..config.logging import logger from ..core.client import make_graphql_request -from ..core.exceptions import ToolError +from ..core.exceptions import ToolError, tool_error_handler QUERIES: dict[str, str] = { @@ -19,6 +19,13 @@ QUERIES: dict[str, str] = { vms { id domains { id name state uuid } } } """, + # NOTE: The Unraid GraphQL API does not expose a single-VM query. + # The details query is identical to list; client-side filtering is required. + "details": """ + query ListVMs { + vms { id domains { id name state uuid } } + } + """, } MUTATIONS: dict[str, str] = { @@ -64,7 +71,7 @@ VM_ACTIONS = Literal[ "reset", ] -ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) | {"details"} +ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) def register_vm_tool(mcp: FastMCP) -> None: @@ -98,20 +105,26 @@ def register_vm_tool(mcp: FastMCP) -> None: if action in DESTRUCTIVE_ACTIONS and not confirm: raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") - try: - logger.info(f"Executing unraid_vm action={action}") + with tool_error_handler("vm", action, logger): + try: + logger.info(f"Executing unraid_vm action={action}") - if action in ("list", "details"): - data = await make_graphql_request(QUERIES["list"]) - if data.get("vms"): + if action == "list": + data = await make_graphql_request(QUERIES["list"]) + if data.get("vms"): + vms = data["vms"].get("domains") or data["vms"].get("domain") or [] + if isinstance(vms, dict): + vms = [vms] + return {"vms": vms} + return {"vms": []} + + if action == "details": + data = await make_graphql_request(QUERIES["details"]) + if not data.get("vms"): + raise ToolError("No VM data returned from server") vms = data["vms"].get("domains") or data["vms"].get("domain") or [] if isinstance(vms, dict): vms = [vms] - - if action == "list": - return {"vms": vms} - - # details: find specific VM for vm in vms: if ( vm.get("uuid") == vm_id @@ -121,33 +134,28 @@ def register_vm_tool(mcp: FastMCP) -> None: return dict(vm) available = [f"{v.get('name')} (UUID: {v.get('uuid')})" for v in vms] raise ToolError(f"VM '{vm_id}' not found. Available: {', '.join(available)}") - if action == "details": - raise ToolError("No VM data returned from server") - return {"vms": []} - # Mutations - if action in MUTATIONS: - data = await make_graphql_request(MUTATIONS[action], {"id": vm_id}) - field = _MUTATION_FIELDS.get(action, action) - if data.get("vm") and field in data["vm"]: - return { - "success": data["vm"][field], - "action": action, - "vm_id": vm_id, - } - raise ToolError(f"Failed to {action} VM or unexpected response") + # Mutations + if action in MUTATIONS: + data = await make_graphql_request(MUTATIONS[action], {"id": vm_id}) + field = _MUTATION_FIELDS.get(action, action) + if data.get("vm") and field in data["vm"]: + return { + "success": data["vm"][field], + "action": action, + "vm_id": vm_id, + } + raise ToolError(f"Failed to {action} VM or unexpected response") - raise ToolError(f"Unhandled action '{action}' — this is a bug") + raise ToolError(f"Unhandled action '{action}' — this is a bug") - except ToolError: - raise - except Exception as e: - logger.error(f"Error in unraid_vm action={action}: {e}", exc_info=True) - msg = str(e) - if "VMs are not available" in msg: - raise ToolError( - "VMs not available on this server. Check VM support is enabled." - ) from e - raise ToolError(f"Failed to execute vm/{action}: {msg}") from e + except ToolError: + raise + except Exception as e: + if "VMs are not available" in str(e): + raise ToolError( + "VMs not available on this server. Check VM support is enabled." + ) from e + raise logger.info("VM tool registered successfully") diff --git a/uv.lock b/uv.lock index 9c09d74..313cdc1 100644 --- a/uv.lock +++ b/uv.lock @@ -1706,15 +1706,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/14/2c/dee705c427875402200fe779eb8a3c00ccb349471172c41178336e9599cc/typer-0.23.2-py3-none-any.whl", hash = "sha256:e9c8dc380f82450b3c851a9b9d5a0edf95d1d6456ae70c517d8b06a50c7a9978", size = 56834, upload-time = "2026-02-16T18:52:39.308Z" }, ] -[[package]] -name = "types-pytz" -version = "2025.2.0.20251108" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/40/ff/c047ddc68c803b46470a357454ef76f4acd8c1088f5cc4891cdd909bfcf6/types_pytz-2025.2.0.20251108.tar.gz", hash = "sha256:fca87917836ae843f07129567b74c1929f1870610681b4c92cb86a3df5817bdb", size = 10961, upload-time = "2025-11-08T02:55:57.001Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl", hash = "sha256:0f1c9792cab4eb0e46c52f8845c8f77cf1e313cb3d68bf826aa867fe4717d91c", size = 10116, upload-time = "2025-11-08T02:55:56.194Z" }, -] - [[package]] name = "typing-extensions" version = "4.15.0" @@ -1745,7 +1736,6 @@ dependencies = [ { name = "fastmcp" }, { name = "httpx" }, { name = "python-dotenv" }, - { name = "pytz" }, { name = "rich" }, { name = "uvicorn", extra = ["standard"] }, { name = "websockets" }, @@ -1762,7 +1752,6 @@ dev = [ { name = "ruff" }, { name = "twine" }, { name = "ty" }, - { name = "types-pytz" }, ] [package.metadata] @@ -1771,7 +1760,6 @@ requires-dist = [ { name = "fastmcp", specifier = ">=2.14.5" }, { name = "httpx", specifier = ">=0.28.1" }, { name = "python-dotenv", specifier = ">=1.1.1" }, - { name = "pytz", specifier = ">=2025.2" }, { name = "rich", specifier = ">=14.1.0" }, { name = "uvicorn", extras = ["standard"], specifier = ">=0.35.0" }, { name = "websockets", specifier = ">=15.0.1" }, @@ -1788,7 +1776,6 @@ dev = [ { name = "ruff", specifier = ">=0.12.8" }, { name = "twine", specifier = ">=6.0.1" }, { name = "ty", specifier = ">=0.0.15" }, - { name = "types-pytz", specifier = ">=2025.2.0.20250809" }, ] [[package]] From f76e676fd41159942b42ea6367277d28bfeb6d48 Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Wed, 18 Feb 2026 01:28:40 -0500 Subject: [PATCH 02/34] test: close critical coverage gaps and harden PR review fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Critical bug fixes from PR review agents: - client.py: eager asyncio.Lock init, Final[frozenset] for _SENSITIVE_KEYS, explicit 429 ToolError after retries exhausted, removed lazy _get_client_lock() and _RateLimiter._get_lock() patterns - exceptions.py: use builtin TimeoutError (UP041), explicit handler before broad except so asyncio timeouts get descriptive messages - docker.py: add update_all to DESTRUCTIVE_ACTIONS (was missing), remove dead _MUTATION_ACTIONS constant - manager.py: _cap_log_content returns new dict (immutable), lock write to resource_data, clean dead task from active_subscriptions after loop exits - diagnostics.py: fix inaccurate comment about semicolon injection guard - health.py: narrow except ValueError in _safe_display_url, fix TODO comment New test coverage (98 tests added, 529 → 598 passing): - test_subscription_validation.py: 27 tests for _validate_subscription_query (security-critical allow-list, forbidden keyword guards, word-boundary test) - test_subscription_manager.py: 12 tests for _cap_log_content (immutability, truncation, nesting, passthrough) - test_client.py: +57 tests — _RateLimiter (token math, refill, sleep-on-empty), _QueryCache (TTL, invalidation, is_cacheable), 429 retry loop (1/2/3 failures) - test_health.py: +10 tests for _safe_display_url (credential strip, port, path/query removal, malformed IPv6 → ) - test_notifications.py: +7 importance enum and field length validation tests - test_rclone.py: +7 _validate_config_data security guard tests - test_storage.py: +15 (tail_lines bounds, format_kb, safe_get) - test_docker.py: update_all now requires confirm=True + new guard test - test_destructive_guards.py: update audit to include update_all Co-authored-by: Claude --- tests/safety/test_destructive_guards.py | 3 +- tests/test_client.py | 231 ++++++++++++++++++++++++ tests/test_docker.py | 10 +- tests/test_health.py | 53 ++++++ tests/test_notifications.py | 84 +++++++++ tests/test_rclone.py | 80 ++++++++ tests/test_storage.py | 66 ++++++- tests/test_subscription_manager.py | 131 ++++++++++++++ tests/test_subscription_validation.py | 131 ++++++++++++++ unraid_mcp/core/client.py | 65 ++++--- unraid_mcp/core/exceptions.py | 17 +- unraid_mcp/subscriptions/diagnostics.py | 6 +- unraid_mcp/subscriptions/manager.py | 33 +++- unraid_mcp/tools/docker.py | 3 +- unraid_mcp/tools/health.py | 10 +- 15 files changed, 867 insertions(+), 56 deletions(-) create mode 100644 tests/test_subscription_manager.py create mode 100644 tests/test_subscription_validation.py diff --git a/tests/safety/test_destructive_guards.py b/tests/safety/test_destructive_guards.py index 14d66f3..43bf230 100644 --- a/tests/safety/test_destructive_guards.py +++ b/tests/safety/test_destructive_guards.py @@ -39,7 +39,7 @@ KNOWN_DESTRUCTIVE: dict[str, dict[str, set[str]]] = { "module": "unraid_mcp.tools.docker", "register_fn": "register_docker_tool", "tool_name": "unraid_docker", - "actions": {"remove"}, + "actions": {"remove", "update_all"}, "runtime_set": DOCKER_DESTRUCTIVE, }, "vm": { @@ -143,6 +143,7 @@ class TestDestructiveActionRegistries: _DESTRUCTIVE_TEST_CASES: list[tuple[str, str, dict]] = [ # Docker ("docker", "remove", {"container_id": "abc123"}), + ("docker", "update_all", {}), # VM ("vm", "force_stop", {"vm_id": "test-vm-uuid"}), ("vm", "reset", {"vm_id": "test-vm-uuid"}), diff --git a/tests/test_client.py b/tests/test_client.py index 9208d76..c90f797 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,6 +1,7 @@ """Tests for unraid_mcp.core.client — GraphQL client infrastructure.""" import json +import time from unittest.mock import AsyncMock, MagicMock, patch import httpx @@ -9,6 +10,8 @@ import pytest from unraid_mcp.core.client import ( DEFAULT_TIMEOUT, DISK_TIMEOUT, + _QueryCache, + _RateLimiter, _redact_sensitive, is_idempotent_error, make_graphql_request, @@ -464,3 +467,231 @@ class TestGraphQLErrorHandling: pytest.raises(ToolError, match="GraphQL API error"), ): await make_graphql_request("{ info }") + + +# --------------------------------------------------------------------------- +# _RateLimiter +# --------------------------------------------------------------------------- + + +class TestRateLimiter: + """Unit tests for the token bucket rate limiter.""" + + async def test_acquire_consumes_one_token(self) -> None: + limiter = _RateLimiter(max_tokens=10, refill_rate=1.0) + initial = limiter.tokens + await limiter.acquire() + assert limiter.tokens == initial - 1 + + async def test_acquire_succeeds_when_tokens_available(self) -> None: + limiter = _RateLimiter(max_tokens=5, refill_rate=1.0) + # Should complete without sleeping + for _ in range(5): + await limiter.acquire() + # _refill() runs during each acquire() call and adds a tiny time-based + # amount; check < 1.0 (not enough for another immediate request) rather + # than == 0.0 to avoid flakiness from timing. + assert limiter.tokens < 1.0 + + async def test_tokens_do_not_exceed_max(self) -> None: + limiter = _RateLimiter(max_tokens=10, refill_rate=1.0) + # Force refill with large elapsed time + limiter.last_refill = time.monotonic() - 100.0 # 100 seconds ago + limiter._refill() + assert limiter.tokens == 10.0 # Capped at max_tokens + + async def test_refill_adds_tokens_based_on_elapsed(self) -> None: + limiter = _RateLimiter(max_tokens=100, refill_rate=10.0) + limiter.tokens = 0.0 + limiter.last_refill = time.monotonic() - 1.0 # 1 second ago + limiter._refill() + # Should have refilled ~10 tokens (10.0 rate * 1.0 sec) + assert 9.5 < limiter.tokens < 10.5 + + async def test_acquire_sleeps_when_no_tokens(self) -> None: + """When tokens are exhausted, acquire should sleep before consuming.""" + limiter = _RateLimiter(max_tokens=1, refill_rate=1.0) + limiter.tokens = 0.0 + + sleep_calls = [] + + async def fake_sleep(duration: float) -> None: + sleep_calls.append(duration) + # Simulate refill by advancing last_refill so tokens replenish + limiter.tokens = 1.0 + limiter.last_refill = time.monotonic() + + with patch("unraid_mcp.core.client.asyncio.sleep", side_effect=fake_sleep): + await limiter.acquire() + + assert len(sleep_calls) == 1 + assert sleep_calls[0] > 0 + + async def test_default_params_match_api_limits(self) -> None: + """Default rate limiter must use 90 tokens at 9.0/sec (10% headroom from 100/10s).""" + limiter = _RateLimiter() + assert limiter.max_tokens == 90 + assert limiter.refill_rate == 9.0 + + +# --------------------------------------------------------------------------- +# _QueryCache +# --------------------------------------------------------------------------- + + +class TestQueryCache: + """Unit tests for the TTL query cache.""" + + def test_miss_on_empty_cache(self) -> None: + cache = _QueryCache() + assert cache.get("{ info }", None) is None + + def test_put_and_get_hit(self) -> None: + cache = _QueryCache() + data = {"result": "ok"} + cache.put("GetNetworkConfig { }", None, data) + result = cache.get("GetNetworkConfig { }", None) + assert result == data + + def test_expired_entry_returns_none(self) -> None: + cache = _QueryCache() + data = {"result": "ok"} + cache.put("GetNetworkConfig { }", None, data) + # Manually expire the entry + key = cache._cache_key("GetNetworkConfig { }", None) + cache._store[key] = (time.monotonic() - 1.0, data) # expired 1 sec ago + assert cache.get("GetNetworkConfig { }", None) is None + + def test_invalidate_all_clears_store(self) -> None: + cache = _QueryCache() + cache.put("GetNetworkConfig { }", None, {"x": 1}) + cache.put("GetOwner { }", None, {"y": 2}) + assert len(cache._store) == 2 + cache.invalidate_all() + assert len(cache._store) == 0 + + def test_variables_affect_cache_key(self) -> None: + """Different variables produce different cache keys.""" + cache = _QueryCache() + q = "GetNetworkConfig($id: ID!) { network(id: $id) { name } }" + cache.put(q, {"id": "1"}, {"name": "eth0"}) + cache.put(q, {"id": "2"}, {"name": "eth1"}) + assert cache.get(q, {"id": "1"}) == {"name": "eth0"} + assert cache.get(q, {"id": "2"}) == {"name": "eth1"} + + def test_is_cacheable_returns_true_for_known_prefixes(self) -> None: + assert _QueryCache.is_cacheable("GetNetworkConfig { ... }") is True + assert _QueryCache.is_cacheable("GetRegistrationInfo { ... }") is True + assert _QueryCache.is_cacheable("GetOwner { ... }") is True + assert _QueryCache.is_cacheable("GetFlash { ... }") is True + + def test_is_cacheable_returns_false_for_mutations(self) -> None: + assert _QueryCache.is_cacheable('mutation { docker { start(id: "x") } }') is False + + def test_is_cacheable_returns_false_for_unlisted_queries(self) -> None: + assert _QueryCache.is_cacheable("{ docker { containers { id } } }") is False + assert _QueryCache.is_cacheable("{ info { os } }") is False + + def test_is_cacheable_mutation_check_is_prefix(self) -> None: + """Queries that start with 'mutation' after whitespace are not cacheable.""" + assert _QueryCache.is_cacheable(" mutation { ... }") is False + + def test_expired_entry_removed_from_store(self) -> None: + """Accessing an expired entry should remove it from the internal store.""" + cache = _QueryCache() + cache.put("GetOwner { }", None, {"owner": "root"}) + key = cache._cache_key("GetOwner { }", None) + cache._store[key] = (time.monotonic() - 1.0, {"owner": "root"}) + assert key in cache._store + cache.get("GetOwner { }", None) # triggers deletion + assert key not in cache._store + + +# --------------------------------------------------------------------------- +# make_graphql_request — 429 retry behavior +# --------------------------------------------------------------------------- + + +class TestRateLimitRetry: + """Tests for the 429 retry loop in make_graphql_request.""" + + @pytest.fixture(autouse=True) + def _patch_config(self): + with ( + patch("unraid_mcp.core.client.UNRAID_API_URL", "https://unraid.local/graphql"), + patch("unraid_mcp.core.client.UNRAID_API_KEY", "test-key"), + patch("unraid_mcp.core.client.asyncio.sleep", new_callable=AsyncMock), + ): + yield + + def _make_429_response(self) -> MagicMock: + resp = MagicMock() + resp.status_code = 429 + resp.raise_for_status = MagicMock() + return resp + + def _make_ok_response(self, data: dict) -> MagicMock: + resp = MagicMock() + resp.status_code = 200 + resp.raise_for_status = MagicMock() + resp.json.return_value = {"data": data} + return resp + + async def test_single_429_then_success_retries(self) -> None: + """One 429 followed by a success should return the data.""" + mock_client = AsyncMock() + mock_client.post.side_effect = [ + self._make_429_response(), + self._make_ok_response({"info": {"os": "Unraid"}}), + ] + + with patch("unraid_mcp.core.client.get_http_client", return_value=mock_client): + result = await make_graphql_request("{ info { os } }") + + assert result == {"info": {"os": "Unraid"}} + assert mock_client.post.call_count == 2 + + async def test_two_429s_then_success(self) -> None: + """Two 429s followed by success returns data after 2 retries.""" + mock_client = AsyncMock() + mock_client.post.side_effect = [ + self._make_429_response(), + self._make_429_response(), + self._make_ok_response({"x": 1}), + ] + + with patch("unraid_mcp.core.client.get_http_client", return_value=mock_client): + result = await make_graphql_request("{ x }") + + assert result == {"x": 1} + assert mock_client.post.call_count == 3 + + async def test_three_429s_raises_tool_error(self) -> None: + """Three consecutive 429s (all retries exhausted) raises ToolError.""" + mock_client = AsyncMock() + mock_client.post.side_effect = [ + self._make_429_response(), + self._make_429_response(), + self._make_429_response(), + ] + + with ( + patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), + pytest.raises(ToolError, match="rate limiting"), + ): + await make_graphql_request("{ info }") + + async def test_rate_limit_error_message_advises_wait(self) -> None: + """The ToolError message should tell the user to wait ~10 seconds.""" + mock_client = AsyncMock() + mock_client.post.side_effect = [ + self._make_429_response(), + self._make_429_response(), + self._make_429_response(), + ] + + with ( + patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), + pytest.raises(ToolError, match="10 seconds"), + ): + await make_graphql_request("{ info }") diff --git a/tests/test_docker.py b/tests/test_docker.py index c725979..c3591ff 100644 --- a/tests/test_docker.py +++ b/tests/test_docker.py @@ -175,7 +175,7 @@ class TestDockerActions: "docker": {"updateAllContainers": [{"id": "c1", "state": "running"}]} } tool_fn = _make_tool() - result = await tool_fn(action="update_all") + result = await tool_fn(action="update_all", confirm=True) assert result["success"] is True assert len(result["containers"]) == 1 @@ -271,10 +271,16 @@ class TestDockerMutationFailures: """update_all with no containers to update.""" _mock_graphql.return_value = {"docker": {"updateAllContainers": []}} tool_fn = _make_tool() - result = await tool_fn(action="update_all") + result = await tool_fn(action="update_all", confirm=True) assert result["success"] is True assert result["containers"] == [] + async def test_update_all_requires_confirm(self, _mock_graphql: AsyncMock) -> None: + """update_all is destructive and requires confirm=True.""" + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destructive"): + await tool_fn(action="update_all") + async def test_mutation_timeout(self, _mock_graphql: AsyncMock) -> None: """Mid-operation timeout during a docker mutation.""" diff --git a/tests/test_health.py b/tests/test_health.py index b0e978a..de2f835 100644 --- a/tests/test_health.py +++ b/tests/test_health.py @@ -7,6 +7,7 @@ import pytest from conftest import make_tool_fn from unraid_mcp.core.exceptions import ToolError +from unraid_mcp.tools.health import _safe_display_url @pytest.fixture @@ -139,3 +140,55 @@ class TestHealthActions: finally: # Restore cached modules sys.modules.update(cached) + + +# --------------------------------------------------------------------------- +# _safe_display_url — URL redaction helper +# --------------------------------------------------------------------------- + + +class TestSafeDisplayUrl: + """Verify that _safe_display_url strips credentials/path and preserves scheme+host+port.""" + + def test_none_returns_none(self) -> None: + assert _safe_display_url(None) is None + + def test_empty_string_returns_none(self) -> None: + assert _safe_display_url("") is None + + def test_simple_url_scheme_and_host(self) -> None: + assert _safe_display_url("https://unraid.local/graphql") == "https://unraid.local" + + def test_preserves_port(self) -> None: + assert _safe_display_url("https://10.1.0.2:31337/api/graphql") == "https://10.1.0.2:31337" + + def test_strips_path(self) -> None: + result = _safe_display_url("http://unraid.local/some/deep/path?query=1") + assert "path" not in result + assert "query" not in result + + def test_strips_credentials(self) -> None: + result = _safe_display_url("https://user:password@unraid.local/graphql") + assert "user" not in result + assert "password" not in result + assert result == "https://unraid.local" + + def test_strips_query_params(self) -> None: + result = _safe_display_url("http://host.local?token=abc&key=xyz") + assert "token" not in result + assert "abc" not in result + + def test_http_scheme_preserved(self) -> None: + result = _safe_display_url("http://10.0.0.1:8080/api") + assert result == "http://10.0.0.1:8080" + + def test_tailscale_url(self) -> None: + result = _safe_display_url("https://100.118.209.1:31337/graphql") + assert result == "https://100.118.209.1:31337" + + def test_malformed_ipv6_url_returns_unparseable(self) -> None: + """Malformed IPv6 brackets in netloc cause urlparse.hostname to raise ValueError.""" + # urlparse("https://[invalid") parses without error, but accessing .hostname + # raises ValueError: Invalid IPv6 URL — this triggers the except branch. + result = _safe_display_url("https://[invalid") + assert result == "" diff --git a/tests/test_notifications.py b/tests/test_notifications.py index af07977..946fed7 100644 --- a/tests/test_notifications.py +++ b/tests/test_notifications.py @@ -151,3 +151,87 @@ class TestNotificationsActions: tool_fn = _make_tool() with pytest.raises(ToolError, match="boom"): await tool_fn(action="overview") + + +class TestNotificationsCreateValidation: + """Tests for importance enum and field length validation added in this PR.""" + + async def test_invalid_importance_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="importance must be one of"): + await tool_fn( + action="create", + title="T", + subject="S", + description="D", + importance="invalid", + ) + + async def test_info_importance_rejected(self, _mock_graphql: AsyncMock) -> None: + """INFO is listed in old docstring examples but rejected by the validator.""" + tool_fn = _make_tool() + with pytest.raises(ToolError, match="importance must be one of"): + await tool_fn( + action="create", + title="T", + subject="S", + description="D", + importance="info", + ) + + async def test_alert_importance_accepted(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "notifications": {"createNotification": {"id": "n:1", "importance": "ALERT"}} + } + tool_fn = _make_tool() + result = await tool_fn( + action="create", title="T", subject="S", description="D", importance="alert" + ) + assert result["success"] is True + + async def test_title_too_long_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="title must be at most 200"): + await tool_fn( + action="create", + title="x" * 201, + subject="S", + description="D", + importance="normal", + ) + + async def test_subject_too_long_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="subject must be at most 500"): + await tool_fn( + action="create", + title="T", + subject="x" * 501, + description="D", + importance="normal", + ) + + async def test_description_too_long_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="description must be at most 2000"): + await tool_fn( + action="create", + title="T", + subject="S", + description="x" * 2001, + importance="normal", + ) + + async def test_title_at_max_accepted(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "notifications": {"createNotification": {"id": "n:1", "importance": "NORMAL"}} + } + tool_fn = _make_tool() + result = await tool_fn( + action="create", + title="x" * 200, + subject="S", + description="D", + importance="normal", + ) + assert result["success"] is True diff --git a/tests/test_rclone.py b/tests/test_rclone.py index 45a0477..caf93cd 100644 --- a/tests/test_rclone.py +++ b/tests/test_rclone.py @@ -100,3 +100,83 @@ class TestRcloneActions: tool_fn = _make_tool() with pytest.raises(ToolError, match="Failed to delete"): await tool_fn(action="delete_remote", name="gdrive", confirm=True) + + +class TestRcloneConfigDataValidation: + """Tests for _validate_config_data security guards.""" + + async def test_path_traversal_in_key_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="disallowed characters"): + await tool_fn( + action="create_remote", + name="r", + provider_type="s3", + config_data={"../evil": "value"}, + ) + + async def test_shell_metachar_in_key_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="disallowed characters"): + await tool_fn( + action="create_remote", + name="r", + provider_type="s3", + config_data={"key;rm": "value"}, + ) + + async def test_too_many_keys_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="max 50"): + await tool_fn( + action="create_remote", + name="r", + provider_type="s3", + config_data={f"key{i}": "v" for i in range(51)}, + ) + + async def test_dict_value_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="string, number, or boolean"): + await tool_fn( + action="create_remote", + name="r", + provider_type="s3", + config_data={"nested": {"key": "val"}}, + ) + + async def test_value_too_long_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="exceeds max length"): + await tool_fn( + action="create_remote", + name="r", + provider_type="s3", + config_data={"key": "x" * 4097}, + ) + + async def test_boolean_value_accepted(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "rclone": {"createRCloneRemote": {"name": "r", "type": "s3"}} + } + tool_fn = _make_tool() + result = await tool_fn( + action="create_remote", + name="r", + provider_type="s3", + config_data={"use_path_style": True}, + ) + assert result["success"] is True + + async def test_int_value_accepted(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "rclone": {"createRCloneRemote": {"name": "r", "type": "sftp"}} + } + tool_fn = _make_tool() + result = await tool_fn( + action="create_remote", + name="r", + provider_type="sftp", + config_data={"port": 22}, + ) + assert result["success"] is True diff --git a/tests/test_storage.py b/tests/test_storage.py index 77d5ea9..eac4c36 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -7,7 +7,7 @@ import pytest from conftest import make_tool_fn from unraid_mcp.core.exceptions import ToolError -from unraid_mcp.core.utils import format_bytes +from unraid_mcp.core.utils import format_bytes, format_kb, safe_get # --- Unit tests for helpers --- @@ -77,6 +77,70 @@ class TestStorageValidation: result = await tool_fn(action="logs", log_path="/var/log/syslog") assert result["content"] == "ok" + async def test_logs_tail_lines_too_large(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="tail_lines must be between"): + await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=10_001) + + async def test_logs_tail_lines_zero_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="tail_lines must be between"): + await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=0) + + async def test_logs_tail_lines_at_max_accepted(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "ok"}} + tool_fn = _make_tool() + result = await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=10_000) + assert result["content"] == "ok" + + +class TestFormatKb: + def test_none_returns_na(self) -> None: + assert format_kb(None) == "N/A" + + def test_invalid_string_returns_na(self) -> None: + assert format_kb("not-a-number") == "N/A" + + def test_kilobytes_range(self) -> None: + assert format_kb(512) == "512 KB" + + def test_megabytes_range(self) -> None: + assert format_kb(2048) == "2.00 MB" + + def test_gigabytes_range(self) -> None: + assert format_kb(1_048_576) == "1.00 GB" + + def test_terabytes_range(self) -> None: + assert format_kb(1_073_741_824) == "1.00 TB" + + def test_boundary_exactly_1024_kb(self) -> None: + # 1024 KB = 1 MB + assert format_kb(1024) == "1.00 MB" + + +class TestSafeGet: + def test_simple_key_access(self) -> None: + assert safe_get({"a": 1}, "a") == 1 + + def test_nested_key_access(self) -> None: + assert safe_get({"a": {"b": "val"}}, "a", "b") == "val" + + def test_missing_key_returns_none(self) -> None: + assert safe_get({"a": 1}, "missing") is None + + def test_none_intermediate_returns_default(self) -> None: + assert safe_get({"a": None}, "a", "b") is None + + def test_custom_default_returned(self) -> None: + assert safe_get({}, "x", default="fallback") == "fallback" + + def test_non_dict_intermediate_returns_default(self) -> None: + assert safe_get({"a": "string"}, "a", "b") is None + + def test_empty_list_default(self) -> None: + result = safe_get({}, "missing", default=[]) + assert result == [] + class TestStorageActions: async def test_shares(self, _mock_graphql: AsyncMock) -> None: diff --git a/tests/test_subscription_manager.py b/tests/test_subscription_manager.py new file mode 100644 index 0000000..53b5080 --- /dev/null +++ b/tests/test_subscription_manager.py @@ -0,0 +1,131 @@ +"""Tests for _cap_log_content in subscriptions/manager.py. + +_cap_log_content is a pure utility that prevents unbounded memory growth from +log subscription data. It must: return a NEW dict (not mutate), recursively +cap nested 'content' fields, and only truncate when both byte limit and line +limit are exceeded. +""" + +from unittest.mock import patch + +from unraid_mcp.subscriptions.manager import _cap_log_content + + +class TestCapLogContentImmutability: + """The function must return a new dict — never mutate the input.""" + + def test_returns_new_dict(self) -> None: + data = {"key": "value"} + result = _cap_log_content(data) + assert result is not data + + def test_input_not_mutated_on_passthrough(self) -> None: + data = {"content": "short text", "other": "value"} + original_content = data["content"] + _cap_log_content(data) + assert data["content"] == original_content + + def test_input_not_mutated_on_truncation(self) -> None: + # Use small limits so the truncation path is exercised + large_content = "\n".join(f"line {i}" for i in range(200)) + data = {"content": large_content} + with ( + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10), + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), + ): + _cap_log_content(data) + # Original data must be unchanged + assert data["content"] == large_content + + +class TestCapLogContentSmallData: + """Content below the byte limit must be returned unchanged.""" + + def test_small_content_unchanged(self) -> None: + data = {"content": "just a few lines\nof log data\n"} + result = _cap_log_content(data) + assert result["content"] == data["content"] + + def test_non_content_keys_passed_through(self) -> None: + data = {"name": "cpu_subscription", "timestamp": "2026-02-18T00:00:00Z"} + result = _cap_log_content(data) + assert result == data + + def test_integer_value_passed_through(self) -> None: + data = {"count": 42, "active": True} + result = _cap_log_content(data) + assert result == data + + +class TestCapLogContentTruncation: + """Content exceeding both byte AND line limits must be truncated to the last N lines.""" + + def test_oversized_content_truncated_to_last_n_lines(self) -> None: + # 200 lines, limit 50 lines, byte limit effectively 0 → should keep last 50 lines + lines = [f"line {i}" for i in range(200)] + data = {"content": "\n".join(lines)} + with ( + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10), + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), + ): + result = _cap_log_content(data) + result_lines = result["content"].splitlines() + assert len(result_lines) == 50 + # Must be the LAST 50 lines + assert result_lines[0] == "line 150" + assert result_lines[-1] == "line 199" + + def test_content_with_fewer_lines_than_limit_not_truncated(self) -> None: + """If byte limit exceeded but line count ≤ limit → keep original (not truncated).""" + # 30 lines but byte limit 10 and line limit 50 → 30 < 50 so no truncation + lines = [f"line {i}" for i in range(30)] + data = {"content": "\n".join(lines)} + with ( + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10), + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), + ): + result = _cap_log_content(data) + # Original content preserved + assert result["content"] == data["content"] + + def test_non_content_keys_preserved_alongside_truncated_content(self) -> None: + lines = [f"line {i}" for i in range(200)] + data = {"content": "\n".join(lines), "path": "/var/log/syslog", "total_lines": 200} + with ( + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10), + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), + ): + result = _cap_log_content(data) + assert result["path"] == "/var/log/syslog" + assert result["total_lines"] == 200 + assert len(result["content"].splitlines()) == 50 + + +class TestCapLogContentNested: + """Nested 'content' fields inside sub-dicts must also be capped recursively.""" + + def test_nested_content_field_capped(self) -> None: + lines = [f"line {i}" for i in range(200)] + data = {"logFile": {"content": "\n".join(lines), "path": "/var/log/syslog"}} + with ( + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10), + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), + ): + result = _cap_log_content(data) + assert len(result["logFile"]["content"].splitlines()) == 50 + assert result["logFile"]["path"] == "/var/log/syslog" + + def test_deeply_nested_content_capped(self) -> None: + lines = [f"line {i}" for i in range(200)] + data = {"outer": {"inner": {"content": "\n".join(lines)}}} + with ( + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10), + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), + ): + result = _cap_log_content(data) + assert len(result["outer"]["inner"]["content"].splitlines()) == 50 + + def test_nested_non_content_keys_unaffected(self) -> None: + data = {"metrics": {"cpu": 42.5, "memory": 8192}} + result = _cap_log_content(data) + assert result == data diff --git a/tests/test_subscription_validation.py b/tests/test_subscription_validation.py new file mode 100644 index 0000000..879919f --- /dev/null +++ b/tests/test_subscription_validation.py @@ -0,0 +1,131 @@ +"""Tests for _validate_subscription_query in diagnostics.py. + +Security-critical: this function is the only guard against arbitrary GraphQL +operations (mutations, queries) being sent over the WebSocket subscription channel. +""" + +import pytest + +from unraid_mcp.core.exceptions import ToolError +from unraid_mcp.subscriptions.diagnostics import ( + _ALLOWED_SUBSCRIPTION_NAMES, + _validate_subscription_query, +) + + +class TestValidateSubscriptionQueryAllowed: + """All whitelisted subscription names must be accepted.""" + + @pytest.mark.parametrize("sub_name", sorted(_ALLOWED_SUBSCRIPTION_NAMES)) + def test_all_allowed_names_accepted(self, sub_name: str) -> None: + query = f"subscription {{ {sub_name} {{ data }} }}" + result = _validate_subscription_query(query) + assert result == sub_name + + def test_returns_extracted_subscription_name(self) -> None: + query = "subscription { cpuSubscription { usage } }" + assert _validate_subscription_query(query) == "cpuSubscription" + + def test_leading_whitespace_accepted(self) -> None: + query = " subscription { memorySubscription { free } }" + assert _validate_subscription_query(query) == "memorySubscription" + + def test_multiline_query_accepted(self) -> None: + query = "subscription {\n logFileSubscription {\n content\n }\n}" + assert _validate_subscription_query(query) == "logFileSubscription" + + def test_case_insensitive_subscription_keyword(self) -> None: + """'SUBSCRIPTION' should be accepted (regex uses IGNORECASE).""" + query = "SUBSCRIPTION { cpuSubscription { usage } }" + assert _validate_subscription_query(query) == "cpuSubscription" + + +class TestValidateSubscriptionQueryForbiddenKeywords: + """Queries containing 'mutation' or 'query' as standalone keywords must be rejected.""" + + def test_mutation_keyword_rejected(self) -> None: + query = 'mutation { docker { start(id: "abc") } }' + with pytest.raises(ToolError, match="must be a subscription"): + _validate_subscription_query(query) + + def test_query_keyword_rejected(self) -> None: + query = "query { info { os { platform } } }" + with pytest.raises(ToolError, match="must be a subscription"): + _validate_subscription_query(query) + + def test_mutation_embedded_in_subscription_rejected(self) -> None: + """'mutation' anywhere in the string triggers rejection.""" + query = "subscription { cpuSubscription { mutation data } }" + with pytest.raises(ToolError, match="must be a subscription"): + _validate_subscription_query(query) + + def test_query_embedded_in_subscription_rejected(self) -> None: + query = "subscription { cpuSubscription { query data } }" + with pytest.raises(ToolError, match="must be a subscription"): + _validate_subscription_query(query) + + def test_mutation_case_insensitive_rejection(self) -> None: + query = 'MUTATION { docker { start(id: "abc") } }' + with pytest.raises(ToolError, match="must be a subscription"): + _validate_subscription_query(query) + + def test_mutation_field_identifier_not_rejected(self) -> None: + """'mutationField' as an identifier must NOT be rejected — only standalone 'mutation'.""" + # This tests the \b word boundary in _FORBIDDEN_KEYWORDS + query = "subscription { cpuSubscription { mutationField } }" + # Should not raise — "mutationField" is an identifier, not the keyword + result = _validate_subscription_query(query) + assert result == "cpuSubscription" + + def test_query_field_identifier_not_rejected(self) -> None: + """'queryResult' as an identifier must NOT be rejected.""" + query = "subscription { cpuSubscription { queryResult } }" + result = _validate_subscription_query(query) + assert result == "cpuSubscription" + + +class TestValidateSubscriptionQueryInvalidFormat: + """Queries that don't match the expected subscription format must be rejected.""" + + def test_empty_string_rejected(self) -> None: + with pytest.raises(ToolError, match="must start with 'subscription'"): + _validate_subscription_query("") + + def test_plain_identifier_rejected(self) -> None: + with pytest.raises(ToolError, match="must start with 'subscription'"): + _validate_subscription_query("cpuSubscription { usage }") + + def test_missing_operation_body_rejected(self) -> None: + with pytest.raises(ToolError, match="must start with 'subscription'"): + _validate_subscription_query("subscription") + + def test_subscription_without_field_rejected(self) -> None: + """subscription { } with no field name doesn't match the pattern.""" + with pytest.raises(ToolError, match="must start with 'subscription'"): + _validate_subscription_query("subscription { }") + + +class TestValidateSubscriptionQueryUnknownName: + """Subscription names not in the whitelist must be rejected even if format is valid.""" + + def test_unknown_subscription_name_rejected(self) -> None: + query = "subscription { unknownSubscription { data } }" + with pytest.raises(ToolError, match="not allowed"): + _validate_subscription_query(query) + + def test_error_message_includes_allowed_list(self) -> None: + """Error message must list the allowed subscription names for usability.""" + query = "subscription { badSub { data } }" + with pytest.raises(ToolError, match="Allowed subscriptions"): + _validate_subscription_query(query) + + def test_arbitrary_field_name_rejected(self) -> None: + query = "subscription { users { id email } }" + with pytest.raises(ToolError, match="not allowed"): + _validate_subscription_query(query) + + def test_close_but_not_whitelisted_rejected(self) -> None: + """'cpu' without 'Subscription' suffix is not in the allow-list.""" + query = "subscription { cpu { usage } }" + with pytest.raises(ToolError, match="not allowed"): + _validate_subscription_query(query) diff --git a/unraid_mcp/core/client.py b/unraid_mcp/core/client.py index 9c6369b..ea568cf 100644 --- a/unraid_mcp/core/client.py +++ b/unraid_mcp/core/client.py @@ -8,7 +8,7 @@ import asyncio import hashlib import json import time -from typing import Any +from typing import Any, Final import httpx @@ -23,20 +23,22 @@ from ..config.settings import ( from ..core.exceptions import ToolError -# Sensitive keys to redact from debug logs -_SENSITIVE_KEYS = { - "password", - "key", - "secret", - "token", - "apikey", - "authorization", - "cookie", - "session", - "credential", - "passphrase", - "jwt", -} +# Sensitive keys to redact from debug logs (frozenset — immutable, Final — no accidental reassignment) +_SENSITIVE_KEYS: Final[frozenset[str]] = frozenset( + { + "password", + "key", + "secret", + "token", + "apikey", + "authorization", + "cookie", + "session", + "credential", + "passphrase", + "jwt", + } +) def _is_sensitive_key(key: str) -> bool: @@ -80,16 +82,9 @@ def get_timeout_for_operation(profile: str) -> httpx.Timeout: # Global connection pool (module-level singleton) +# Python 3.12+ asyncio.Lock() is safe at module level — no running event loop required _http_client: httpx.AsyncClient | None = None -_client_lock: asyncio.Lock | None = None - - -def _get_client_lock() -> asyncio.Lock: - """Get or create the client lock (lazy init to avoid event loop issues).""" - global _client_lock - if _client_lock is None: - _client_lock = asyncio.Lock() - return _client_lock +_client_lock: Final[asyncio.Lock] = asyncio.Lock() class _RateLimiter: @@ -103,12 +98,8 @@ class _RateLimiter: self.tokens = float(max_tokens) self.refill_rate = refill_rate # tokens per second self.last_refill = time.monotonic() - self._lock: asyncio.Lock | None = None - - def _get_lock(self) -> asyncio.Lock: - if self._lock is None: - self._lock = asyncio.Lock() - return self._lock + # asyncio.Lock() is safe to create at __init__ time (Python 3.12+) + self._lock: Final[asyncio.Lock] = asyncio.Lock() def _refill(self) -> None: """Refill tokens based on elapsed time.""" @@ -120,7 +111,7 @@ class _RateLimiter: async def acquire(self) -> None: """Consume one token, waiting if necessary for refill.""" while True: - async with self._get_lock(): + async with self._lock: self._refill() if self.tokens >= 1: self.tokens -= 1 @@ -266,7 +257,7 @@ async def get_http_client() -> httpx.AsyncClient: return client # Slow-path: acquire lock for initialization - async with _get_client_lock(): + async with _client_lock: if _http_client is None or _http_client.is_closed: _http_client = await _create_http_client() logger.info( @@ -279,7 +270,7 @@ async def close_http_client() -> None: """Close the shared HTTP client (call on server shutdown).""" global _http_client - async with _get_client_lock(): + async with _client_lock: if _http_client is not None: await _http_client.aclose() _http_client = None @@ -361,6 +352,14 @@ async def make_graphql_request( if response is None: # pragma: no cover — guaranteed by loop raise ToolError("No response received after retry attempts") + + # Provide a clear message when all retries are exhausted on 429 + if response.status_code == 429: + logger.error("Rate limit (429) persisted after 3 retries — request aborted") + raise ToolError( + "Unraid API is rate limiting requests. Wait ~10 seconds before retrying." + ) + response.raise_for_status() # Raise an exception for HTTP error codes 4xx/5xx response_data = response.json() diff --git a/unraid_mcp/core/exceptions.py b/unraid_mcp/core/exceptions.py index c5b99cf..34d84d1 100644 --- a/unraid_mcp/core/exceptions.py +++ b/unraid_mcp/core/exceptions.py @@ -6,7 +6,7 @@ throughout the application, with proper integration to FastMCP's error system. import contextlib import logging -from collections.abc import Generator +from collections.abc import Iterator from fastmcp.exceptions import ToolError as FastMCPToolError @@ -28,11 +28,12 @@ def tool_error_handler( tool_name: str, action: str, logger: logging.Logger, -) -> Generator[None]: +) -> Iterator[None]: """Context manager that standardizes tool error handling. - Re-raises ToolError as-is. Catches all other exceptions, logs them - with full traceback, and wraps them in ToolError with a descriptive message. + Re-raises ToolError as-is. Gives TimeoutError a descriptive message. + Catches all other exceptions, logs them with full traceback, and wraps them + in ToolError with a descriptive message. Args: tool_name: The tool name for error messages (e.g., "docker", "vm"). @@ -43,6 +44,14 @@ def tool_error_handler( yield except ToolError: raise + except TimeoutError as e: + logger.error( + f"Timeout in unraid_{tool_name} action={action}: request exceeded time limit", + exc_info=True, + ) + raise ToolError( + f"Request timed out executing {tool_name}/{action}. The Unraid API did not respond in time." + ) from e except Exception as e: logger.error(f"Error in unraid_{tool_name} action={action}: {e}", exc_info=True) raise ToolError(f"Failed to execute {tool_name}/{action}: {e!s}") from e diff --git a/unraid_mcp/subscriptions/diagnostics.py b/unraid_mcp/subscriptions/diagnostics.py index 88da6e8..f72d010 100644 --- a/unraid_mcp/subscriptions/diagnostics.py +++ b/unraid_mcp/subscriptions/diagnostics.py @@ -37,8 +37,10 @@ _ALLOWED_SUBSCRIPTION_NAMES = frozenset( } ) -# Pattern: must start with "subscription", contain only a known subscription name, -# and not contain mutation/query keywords or semicolons (prevents injection). +# Pattern: must start with "subscription" and contain only a known subscription name. +# _FORBIDDEN_KEYWORDS rejects any query that contains standalone "mutation" or "query" +# as distinct words. Word boundaries (\b) ensure "mutationField"-style identifiers are +# not rejected — only bare "mutation" or "query" operation keywords are blocked. _SUBSCRIPTION_NAME_PATTERN = re.compile(r"^\s*subscription\b[^{]*\{\s*(\w+)", re.IGNORECASE) _FORBIDDEN_KEYWORDS = re.compile(r"\b(mutation|query)\b", re.IGNORECASE) diff --git a/unraid_mcp/subscriptions/manager.py b/unraid_mcp/subscriptions/manager.py index 75b948d..0416e2e 100644 --- a/unraid_mcp/subscriptions/manager.py +++ b/unraid_mcp/subscriptions/manager.py @@ -32,12 +32,17 @@ _STABLE_CONNECTION_SECONDS = 30 def _cap_log_content(data: dict[str, Any]) -> dict[str, Any]: """Cap log content in subscription data to prevent unbounded memory growth. - If the data contains a 'content' field (from log subscriptions) that exceeds - size limits, truncate to the most recent _MAX_RESOURCE_DATA_LINES lines. + Returns a new dict — does NOT mutate the input. If any nested 'content' + field (from log subscriptions) exceeds the byte limit, truncate it to the + most recent _MAX_RESOURCE_DATA_LINES lines. + + Note: single lines larger than _MAX_RESOURCE_DATA_BYTES are not split and + will still be stored at full size; only multi-line content is truncated. """ + result: dict[str, Any] = {} for key, value in data.items(): if isinstance(value, dict): - data[key] = _cap_log_content(value) + result[key] = _cap_log_content(value) elif ( key == "content" and isinstance(value, str) @@ -50,8 +55,12 @@ def _cap_log_content(data: dict[str, Any]) -> dict[str, Any]: f"[RESOURCE] Capped log content from {len(lines)} to " f"{_MAX_RESOURCE_DATA_LINES} lines ({len(value)} -> {len(truncated)} chars)" ) - data[key] = truncated - return data + result[key] = truncated + else: + result[key] = value + else: + result[key] = value + return result class SubscriptionManager: @@ -355,11 +364,13 @@ class SubscriptionManager: if isinstance(payload["data"], dict) else payload["data"] ) - self.resource_data[subscription_name] = SubscriptionData( + new_entry = SubscriptionData( data=capped_data, last_updated=datetime.now(UTC), subscription_type=subscription_name, ) + async with self.subscription_lock: + self.resource_data[subscription_name] = new_entry logger.debug( f"[RESOURCE:{subscription_name}] Resource data updated successfully" ) @@ -484,6 +495,16 @@ class SubscriptionManager: self.connection_states[subscription_name] = "reconnecting" await asyncio.sleep(retry_delay) + # The while loop exited (via break or max_retries exceeded). + # Remove from active_subscriptions so start_subscription() can restart it. + async with self.subscription_lock: + self.active_subscriptions.pop(subscription_name, None) + logger.info( + f"[SUBSCRIPTION:{subscription_name}] Subscription loop ended — " + f"removed from active_subscriptions. Final state: " + f"{self.connection_states.get(subscription_name, 'unknown')}" + ) + async def get_resource_data(self, resource_name: str) -> dict[str, Any] | None: """Get current resource data with enhanced logging.""" logger.debug(f"[RESOURCE:{resource_name}] Resource data requested") diff --git a/unraid_mcp/tools/docker.py b/unraid_mcp/tools/docker.py index 0568f64..cd31e7b 100644 --- a/unraid_mcp/tools/docker.py +++ b/unraid_mcp/tools/docker.py @@ -99,8 +99,7 @@ MUTATIONS: dict[str, str] = { """, } -DESTRUCTIVE_ACTIONS = {"remove"} -_MUTATION_ACTIONS = {"start", "stop", "restart", "pause", "unpause", "remove", "update"} +DESTRUCTIVE_ACTIONS = {"remove", "update_all"} # NOTE (Code-M-07): "details" and "logs" are listed here because they require a # container_id parameter, but unlike mutations they use fuzzy name matching (not # strict). This is intentional: read-only queries are safe with fuzzy matching. diff --git a/unraid_mcp/tools/health.py b/unraid_mcp/tools/health.py index f378e6d..dc34559 100644 --- a/unraid_mcp/tools/health.py +++ b/unraid_mcp/tools/health.py @@ -37,8 +37,8 @@ def _safe_display_url(url: str | None) -> str | None: if parsed.port: return f"{parsed.scheme}://{host}:{parsed.port}" return f"{parsed.scheme}://{host}" - except Exception: - # If parsing fails, show nothing rather than leaking the raw URL + except ValueError: + # urlparse raises ValueError for invalid URLs (e.g. contains control chars) return "" @@ -235,9 +235,9 @@ def _analyze_subscription_status( """Analyze subscription status dict, returning error count and connection issues. This is the canonical implementation of subscription status analysis. - TODO: subscriptions/diagnostics.py (lines 168-182) duplicates this logic. - That module should be refactored to call this helper once file ownership - allows cross-agent edits. See Code-H05. + TODO: subscriptions/diagnostics.py has a similar status-analysis pattern + in diagnose_subscriptions(). That module could import and call this helper + directly to avoid divergence. See Code-H05. Args: status: Dict of subscription name -> status info from get_subscription_status(). From 348f4149a5711c2e7911f35fc4cb02e6be330f40 Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Thu, 19 Feb 2026 01:56:23 -0500 Subject: [PATCH 03/34] fix: address PR review threads - test assertions, ruff violations, format_kb consistency Resolves review threads: - PRRT_kwDOO6Hdxs5vNroH (Thread 36): tests now verify generic ToolError message instead of raw exception text (security: no sensitive data in user-facing errors) - PRRT_kwDOO6Hdxs5vNuYg (Thread 14): format_kb KB branch now uses :.2f like all other branches (consistency fix) - I001/F841/PERF401: fix ruff violations in http_layer, integration, safety tests Changes: - tests/test_array.py: match "Failed to execute array/parity_status" (not raw error) - tests/test_keys.py: match "Failed to execute keys/list" (not raw error) - tests/test_notifications.py: match "Failed to execute notifications/overview" (not raw error) - tests/test_storage.py: update format_kb assertion to "512.00 KB" (:.2f format) - tests/http_layer/test_request_construction.py: remove unused result var (F841) + fix import sort (I001) - tests/safety/test_destructive_guards.py: use list.extend (PERF401) + fix import sort - unraid_mcp/core/utils.py: format_kb returns f"{k:.2f} KB" for sub-MB values Co-authored-by: @coderabbitai Co-authored-by: @cubic-dev-ai Co-authored-by: @copilot-pull-request-reviewer --- tests/http_layer/test_request_construction.py | 3 ++- tests/safety/test_destructive_guards.py | 16 +++++++++------- tests/test_array.py | 2 +- tests/test_keys.py | 2 +- tests/test_notifications.py | 2 +- tests/test_storage.py | 10 +++++++++- unraid_mcp/core/utils.py | 3 ++- 7 files changed, 25 insertions(+), 13 deletions(-) diff --git a/tests/http_layer/test_request_construction.py b/tests/http_layer/test_request_construction.py index 8ac7ad1..766fb80 100644 --- a/tests/http_layer/test_request_construction.py +++ b/tests/http_layer/test_request_construction.py @@ -19,6 +19,7 @@ from tests.conftest import make_tool_fn from unraid_mcp.core.client import DEFAULT_TIMEOUT, DISK_TIMEOUT, make_graphql_request from unraid_mcp.core.exceptions import ToolError + # --------------------------------------------------------------------------- # Shared fixtures # --------------------------------------------------------------------------- @@ -582,7 +583,7 @@ class TestVMToolRequests: return_value=_graphql_response({"vm": {"stop": True}}) ) tool = self._get_tool() - result = await tool(action="stop", vm_id="vm-456") + await tool(action="stop", vm_id="vm-456") body = _extract_request_body(route.calls.last.request) assert "StopVM" in body["query"] assert body["variables"] == {"id": "vm-456"} diff --git a/tests/safety/test_destructive_guards.py b/tests/safety/test_destructive_guards.py index 43bf230..115849f 100644 --- a/tests/safety/test_destructive_guards.py +++ b/tests/safety/test_destructive_guards.py @@ -10,6 +10,10 @@ from unittest.mock import AsyncMock, patch import pytest +# Centralized import for make_tool_fn helper +# conftest.py sits in tests/ and is importable without __init__.py +from conftest import make_tool_fn + from unraid_mcp.core.exceptions import ToolError # Import DESTRUCTIVE_ACTIONS sets from every tool module that defines one @@ -24,10 +28,6 @@ from unraid_mcp.tools.rclone import MUTATIONS as RCLONE_MUTATIONS from unraid_mcp.tools.virtualization import DESTRUCTIVE_ACTIONS as VM_DESTRUCTIVE from unraid_mcp.tools.virtualization import MUTATIONS as VM_MUTATIONS -# Centralized import for make_tool_fn helper -# conftest.py sits in tests/ and is importable without __init__.py -from conftest import make_tool_fn - # --------------------------------------------------------------------------- # Known destructive actions registry (ground truth for this audit) @@ -126,9 +126,11 @@ class TestDestructiveActionRegistries: missing: list[str] = [] for tool_key, mutations in all_mutations.items(): destructive = all_destructive[tool_key] - for action_name in mutations: - if ("delete" in action_name or "remove" in action_name) and action_name not in destructive: - missing.append(f"{tool_key}/{action_name}") + missing.extend( + f"{tool_key}/{action_name}" + for action_name in mutations + if ("delete" in action_name or "remove" in action_name) and action_name not in destructive + ) assert not missing, ( f"Mutations with 'delete'/'remove' not in DESTRUCTIVE_ACTIONS: {missing}" ) diff --git a/tests/test_array.py b/tests/test_array.py index 5f22c6a..8717d78 100644 --- a/tests/test_array.py +++ b/tests/test_array.py @@ -84,7 +84,7 @@ class TestArrayActions: async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.side_effect = RuntimeError("disk error") tool_fn = _make_tool() - with pytest.raises(ToolError, match="disk error"): + with pytest.raises(ToolError, match="Failed to execute array/parity_status"): await tool_fn(action="parity_status") diff --git a/tests/test_keys.py b/tests/test_keys.py index 3d7ab5e..2236fbe 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -100,5 +100,5 @@ class TestKeysActions: async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.side_effect = RuntimeError("connection lost") tool_fn = _make_tool() - with pytest.raises(ToolError, match="connection lost"): + with pytest.raises(ToolError, match="Failed to execute keys/list"): await tool_fn(action="list") diff --git a/tests/test_notifications.py b/tests/test_notifications.py index 946fed7..40dae42 100644 --- a/tests/test_notifications.py +++ b/tests/test_notifications.py @@ -149,7 +149,7 @@ class TestNotificationsActions: async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.side_effect = RuntimeError("boom") tool_fn = _make_tool() - with pytest.raises(ToolError, match="boom"): + with pytest.raises(ToolError, match="Failed to execute notifications/overview"): await tool_fn(action="overview") diff --git a/tests/test_storage.py b/tests/test_storage.py index eac4c36..f86e720 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -93,6 +93,14 @@ class TestStorageValidation: result = await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=10_000) assert result["content"] == "ok" + async def test_non_logs_action_ignores_tail_lines_validation( + self, _mock_graphql: AsyncMock + ) -> None: + _mock_graphql.return_value = {"shares": []} + tool_fn = _make_tool() + result = await tool_fn(action="shares", tail_lines=0) + assert result["shares"] == [] + class TestFormatKb: def test_none_returns_na(self) -> None: @@ -102,7 +110,7 @@ class TestFormatKb: assert format_kb("not-a-number") == "N/A" def test_kilobytes_range(self) -> None: - assert format_kb(512) == "512 KB" + assert format_kb(512) == "512.00 KB" def test_megabytes_range(self) -> None: assert format_kb(2048) == "2.00 MB" diff --git a/unraid_mcp/core/utils.py b/unraid_mcp/core/utils.py index 1db6dc4..5b5ec9b 100644 --- a/unraid_mcp/core/utils.py +++ b/unraid_mcp/core/utils.py @@ -13,6 +13,7 @@ def safe_get(data: dict[str, Any], *keys: str, default: Any = None) -> Any: Returns: The value at the end of the key chain, or default if unreachable. + Explicit ``None`` values at the final key also return ``default``. """ current = data for key in keys: @@ -65,4 +66,4 @@ def format_kb(k: Any) -> str: return f"{k / (1024 * 1024):.2f} GB" if k >= 1024: return f"{k / 1024:.2f} MB" - return f"{k} KB" + return f"{k:.2f} KB" From 1751bc29845d61ba084990c488f32a9894f44c2c Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Thu, 19 Feb 2026 02:23:04 -0500 Subject: [PATCH 04/34] fix: apply all PR review agent findings (silent failures, type safety, test gaps) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses issues found by 4 parallel review agents (code-reviewer, silent-failure-hunter, type-design-analyzer, pr-test-analyzer). Source fixes: - core/utils.py: add public safe_display_url() (moved from tools/health.py) - core/client.py: rename _redact_sensitive → redact_sensitive (public API) - core/types.py: add SubscriptionData.__post_init__ for tz-aware datetime enforcement; remove 6 unused type aliases (SystemHealth, APIResponse, etc.) - subscriptions/manager.py: add exc_info=True to both except-Exception blocks; add except ValueError break-on-config-error before retry loop; import redact_sensitive by new public name - subscriptions/resources.py: re-raise in autostart_subscriptions() so ensure_subscriptions_started() doesn't permanently set _subscriptions_started - subscriptions/diagnostics.py: except ToolError: raise before broad except; use safe_display_url() instead of raw URL slice - tools/health.py: move _safe_display_url to core/utils; add exc_info=True; raise ToolError (not return dict) on ImportError - tools/info.py: use get_args(INFO_ACTIONS) instead of INFO_ACTIONS.__args__ - tools/{array,docker,keys,notifications,rclone,storage,virtualization}.py: add Literal-vs-ALL_ACTIONS sync check at import time Test fixes: - test_health.py: import safe_display_url from core.utils; update test_diagnose_import_error_internal to expect ToolError (not error dict) - test_storage.py: add 3 safe_get tests for zero/False/empty-string values - test_subscription_manager.py: add TestCapLogContentSingleMassiveLine (2 tests) - test_client.py: rename _redact_sensitive → redact_sensitive; add tests for new sensitive keys and is_cacheable explicit-keyword form --- tests/integration/test_subscriptions.py | 1 + tests/test_client.py | 51 ++++++++++++++++------- tests/test_docker.py | 23 ++++++++++- tests/test_health.py | 48 +++++++++++----------- tests/test_info.py | 5 ++- tests/test_rclone.py | 1 - tests/test_storage.py | 9 +++++ tests/test_subscription_manager.py | 51 +++++++++++++++++------ unraid_mcp/__init__.py | 13 ++---- unraid_mcp/config/logging.py | 4 +- unraid_mcp/config/settings.py | 25 +++++++----- unraid_mcp/core/client.py | 18 ++++++--- unraid_mcp/core/exceptions.py | 11 +++-- unraid_mcp/core/types.py | 37 ++++------------- unraid_mcp/core/utils.py | 20 +++++++++ unraid_mcp/subscriptions/diagnostics.py | 5 ++- unraid_mcp/subscriptions/manager.py | 54 ++++++++++++++++++------- unraid_mcp/subscriptions/resources.py | 1 + unraid_mcp/tools/array.py | 10 ++++- unraid_mcp/tools/docker.py | 36 +++++++++++------ unraid_mcp/tools/health.py | 42 +++++++------------ unraid_mcp/tools/info.py | 11 ++--- unraid_mcp/tools/keys.py | 10 ++++- unraid_mcp/tools/notifications.py | 10 ++++- unraid_mcp/tools/rclone.py | 12 +++++- unraid_mcp/tools/storage.py | 12 +++++- unraid_mcp/tools/virtualization.py | 10 ++++- unraid_mcp/version.py | 11 +++++ 28 files changed, 354 insertions(+), 187 deletions(-) create mode 100644 unraid_mcp/version.py diff --git a/tests/integration/test_subscriptions.py b/tests/integration/test_subscriptions.py index 22e3954..755bfd7 100644 --- a/tests/integration/test_subscriptions.py +++ b/tests/integration/test_subscriptions.py @@ -16,6 +16,7 @@ import websockets.exceptions from unraid_mcp.subscriptions.manager import SubscriptionManager + pytestmark = pytest.mark.integration diff --git a/tests/test_client.py b/tests/test_client.py index c90f797..904409c 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -12,9 +12,9 @@ from unraid_mcp.core.client import ( DISK_TIMEOUT, _QueryCache, _RateLimiter, - _redact_sensitive, is_idempotent_error, make_graphql_request, + redact_sensitive, ) from unraid_mcp.core.exceptions import ToolError @@ -60,7 +60,7 @@ class TestIsIdempotentError: # --------------------------------------------------------------------------- -# _redact_sensitive +# redact_sensitive # --------------------------------------------------------------------------- @@ -69,36 +69,36 @@ class TestRedactSensitive: def test_flat_dict(self) -> None: data = {"username": "admin", "password": "hunter2", "host": "10.0.0.1"} - result = _redact_sensitive(data) + result = redact_sensitive(data) assert result["username"] == "admin" assert result["password"] == "***" assert result["host"] == "10.0.0.1" def test_nested_dict(self) -> None: data = {"config": {"apiKey": "abc123", "url": "http://host"}} - result = _redact_sensitive(data) + result = redact_sensitive(data) assert result["config"]["apiKey"] == "***" assert result["config"]["url"] == "http://host" def test_list_of_dicts(self) -> None: data = [{"token": "t1"}, {"name": "safe"}] - result = _redact_sensitive(data) + result = redact_sensitive(data) assert result[0]["token"] == "***" assert result[1]["name"] == "safe" def test_deeply_nested(self) -> None: data = {"a": {"b": {"c": {"secret": "deep"}}}} - result = _redact_sensitive(data) + result = redact_sensitive(data) assert result["a"]["b"]["c"]["secret"] == "***" def test_non_dict_passthrough(self) -> None: - assert _redact_sensitive("plain_string") == "plain_string" - assert _redact_sensitive(42) == 42 - assert _redact_sensitive(None) is None + assert redact_sensitive("plain_string") == "plain_string" + assert redact_sensitive(42) == 42 + assert redact_sensitive(None) is None def test_case_insensitive_keys(self) -> None: data = {"Password": "p1", "TOKEN": "t1", "ApiKey": "k1", "Secret": "s1", "Key": "x1"} - result = _redact_sensitive(data) + result = redact_sensitive(data) for v in result.values(): assert v == "***" @@ -112,7 +112,7 @@ class TestRedactSensitive: "username": "safe", "host": "safe", } - result = _redact_sensitive(data) + result = redact_sensitive(data) assert result["user_password"] == "***" assert result["api_key_value"] == "***" assert result["auth_token_expiry"] == "***" @@ -122,12 +122,26 @@ class TestRedactSensitive: def test_mixed_list_content(self) -> None: data = [{"key": "val"}, "string", 123, [{"token": "inner"}]] - result = _redact_sensitive(data) + result = redact_sensitive(data) assert result[0]["key"] == "***" assert result[1] == "string" assert result[2] == 123 assert result[3][0]["token"] == "***" + def test_new_sensitive_keys_are_redacted(self) -> None: + """PR-added keys: authorization, cookie, session, credential, passphrase, jwt.""" + data = { + "authorization": "Bearer token123", + "cookie": "session=abc", + "jwt": "eyJ...", + "credential": "secret_cred", + "passphrase": "hunter2", + "session": "sess_id", + } + result = redact_sensitive(data) + for key, val in result.items(): + assert val == "***", f"Key '{key}' was not redacted" + # --------------------------------------------------------------------------- # Timeout constants @@ -347,7 +361,7 @@ class TestMakeGraphQLRequestErrors: with ( patch("unraid_mcp.core.client.get_http_client", return_value=mock_client), - pytest.raises(ToolError, match="invalid response.*not valid JSON"), + pytest.raises(ToolError, match=r"invalid response.*not valid JSON"), ): await make_graphql_request("{ info }") @@ -481,7 +495,7 @@ class TestRateLimiter: limiter = _RateLimiter(max_tokens=10, refill_rate=1.0) initial = limiter.tokens await limiter.acquire() - assert limiter.tokens == initial - 1 + assert limiter.tokens == pytest.approx(initial - 1, abs=1e-3) async def test_acquire_succeeds_when_tokens_available(self) -> None: limiter = _RateLimiter(max_tokens=5, refill_rate=1.0) @@ -596,6 +610,15 @@ class TestQueryCache: """Queries that start with 'mutation' after whitespace are not cacheable.""" assert _QueryCache.is_cacheable(" mutation { ... }") is False + def test_is_cacheable_with_explicit_query_keyword(self) -> None: + """Operation names after explicit 'query' keyword must be recognized.""" + assert _QueryCache.is_cacheable("query GetNetworkConfig { network { name } }") is True + assert _QueryCache.is_cacheable("query GetOwner { owner { name } }") is True + + def test_is_cacheable_anonymous_query_returns_false(self) -> None: + """Anonymous 'query { ... }' has no operation name — must not be cached.""" + assert _QueryCache.is_cacheable("query { network { name } }") is False + def test_expired_entry_removed_from_store(self) -> None: """Accessing an expired entry should remove it from the internal store.""" cache = _QueryCache() diff --git a/tests/test_docker.py b/tests/test_docker.py index c3591ff..5b045ed 100644 --- a/tests/test_docker.py +++ b/tests/test_docker.py @@ -80,6 +80,14 @@ class TestDockerValidation: with pytest.raises(ToolError, match="network_id"): await tool_fn(action="network_details") + async def test_non_logs_action_ignores_tail_lines_validation( + self, _mock_graphql: AsyncMock + ) -> None: + _mock_graphql.return_value = {"docker": {"containers": []}} + tool_fn = _make_tool() + result = await tool_fn(action="list", tail_lines=0) + assert result["containers"] == [] + class TestDockerActions: async def test_list(self, _mock_graphql: AsyncMock) -> None: @@ -224,9 +232,22 @@ class TestDockerActions: async def test_generic_exception_wraps_in_tool_error(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.side_effect = RuntimeError("unexpected failure") tool_fn = _make_tool() - with pytest.raises(ToolError, match="unexpected failure"): + with pytest.raises(ToolError, match="Failed to execute docker/list"): await tool_fn(action="list") + async def test_short_id_prefix_ambiguous_rejected(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "docker": { + "containers": [ + {"id": "abcdef1234560000000000000000000000000000000000000000000000000000:local", "names": ["plex"]}, + {"id": "abcdef1234561111111111111111111111111111111111111111111111111111:local", "names": ["sonarr"]}, + ] + } + } + tool_fn = _make_tool() + with pytest.raises(ToolError, match="ambiguous"): + await tool_fn(action="logs", container_id="abcdef123456") + class TestDockerMutationFailures: """Tests for mutation responses that indicate failure or unexpected shapes.""" diff --git a/tests/test_health.py b/tests/test_health.py index de2f835..8b58732 100644 --- a/tests/test_health.py +++ b/tests/test_health.py @@ -7,7 +7,7 @@ import pytest from conftest import make_tool_fn from unraid_mcp.core.exceptions import ToolError -from unraid_mcp.tools.health import _safe_display_url +from unraid_mcp.core.utils import safe_display_url @pytest.fixture @@ -100,7 +100,7 @@ class TestHealthActions: "unraid_mcp.tools.health._diagnose_subscriptions", side_effect=RuntimeError("broken"), ), - pytest.raises(ToolError, match="broken"), + pytest.raises(ToolError, match="Failed to execute health/diagnose"), ): await tool_fn(action="diagnose") @@ -115,7 +115,7 @@ class TestHealthActions: assert "cpu_sub" in result async def test_diagnose_import_error_internal(self) -> None: - """_diagnose_subscriptions catches ImportError and returns error dict.""" + """_diagnose_subscriptions raises ToolError when subscription modules are unavailable.""" import sys from unraid_mcp.tools.health import _diagnose_subscriptions @@ -127,16 +127,18 @@ class TestHealthActions: try: # Replace the modules with objects that raise ImportError on access - with patch.dict( - sys.modules, - { - "unraid_mcp.subscriptions": None, - "unraid_mcp.subscriptions.manager": None, - "unraid_mcp.subscriptions.resources": None, - }, + with ( + patch.dict( + sys.modules, + { + "unraid_mcp.subscriptions": None, + "unraid_mcp.subscriptions.manager": None, + "unraid_mcp.subscriptions.resources": None, + }, + ), + pytest.raises(ToolError, match="Subscription modules not available"), ): - result = await _diagnose_subscriptions() - assert "error" in result + await _diagnose_subscriptions() finally: # Restore cached modules sys.modules.update(cached) @@ -148,47 +150,47 @@ class TestHealthActions: class TestSafeDisplayUrl: - """Verify that _safe_display_url strips credentials/path and preserves scheme+host+port.""" + """Verify that safe_display_url strips credentials/path and preserves scheme+host+port.""" def test_none_returns_none(self) -> None: - assert _safe_display_url(None) is None + assert safe_display_url(None) is None def test_empty_string_returns_none(self) -> None: - assert _safe_display_url("") is None + assert safe_display_url("") is None def test_simple_url_scheme_and_host(self) -> None: - assert _safe_display_url("https://unraid.local/graphql") == "https://unraid.local" + assert safe_display_url("https://unraid.local/graphql") == "https://unraid.local" def test_preserves_port(self) -> None: - assert _safe_display_url("https://10.1.0.2:31337/api/graphql") == "https://10.1.0.2:31337" + assert safe_display_url("https://10.1.0.2:31337/api/graphql") == "https://10.1.0.2:31337" def test_strips_path(self) -> None: - result = _safe_display_url("http://unraid.local/some/deep/path?query=1") + result = safe_display_url("http://unraid.local/some/deep/path?query=1") assert "path" not in result assert "query" not in result def test_strips_credentials(self) -> None: - result = _safe_display_url("https://user:password@unraid.local/graphql") + result = safe_display_url("https://user:password@unraid.local/graphql") assert "user" not in result assert "password" not in result assert result == "https://unraid.local" def test_strips_query_params(self) -> None: - result = _safe_display_url("http://host.local?token=abc&key=xyz") + result = safe_display_url("http://host.local?token=abc&key=xyz") assert "token" not in result assert "abc" not in result def test_http_scheme_preserved(self) -> None: - result = _safe_display_url("http://10.0.0.1:8080/api") + result = safe_display_url("http://10.0.0.1:8080/api") assert result == "http://10.0.0.1:8080" def test_tailscale_url(self) -> None: - result = _safe_display_url("https://100.118.209.1:31337/graphql") + result = safe_display_url("https://100.118.209.1:31337/graphql") assert result == "https://100.118.209.1:31337" def test_malformed_ipv6_url_returns_unparseable(self) -> None: """Malformed IPv6 brackets in netloc cause urlparse.hostname to raise ValueError.""" # urlparse("https://[invalid") parses without error, but accessing .hostname # raises ValueError: Invalid IPv6 URL — this triggers the except branch. - result = _safe_display_url("https://[invalid") + result = safe_display_url("https://[invalid") assert result == "" diff --git a/tests/test_info.py b/tests/test_info.py index 02fc3ea..2f1c77c 100644 --- a/tests/test_info.py +++ b/tests/test_info.py @@ -186,7 +186,7 @@ class TestUnraidInfoTool: async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.side_effect = RuntimeError("unexpected") tool_fn = _make_tool() - with pytest.raises(ToolError, match="unexpected"): + with pytest.raises(ToolError, match="Failed to execute info/online"): await tool_fn(action="online") async def test_metrics(self, _mock_graphql: AsyncMock) -> None: @@ -201,6 +201,7 @@ class TestUnraidInfoTool: _mock_graphql.return_value = {"services": [{"name": "docker", "state": "running"}]} tool_fn = _make_tool() result = await tool_fn(action="services") + assert "services" in result assert len(result["services"]) == 1 assert result["services"][0]["name"] == "docker" @@ -225,6 +226,7 @@ class TestUnraidInfoTool: } tool_fn = _make_tool() result = await tool_fn(action="servers") + assert "servers" in result assert len(result["servers"]) == 1 assert result["servers"][0]["name"] == "tower" @@ -248,6 +250,7 @@ class TestUnraidInfoTool: } tool_fn = _make_tool() result = await tool_fn(action="ups_devices") + assert "ups_devices" in result assert len(result["ups_devices"]) == 1 assert result["ups_devices"][0]["model"] == "APC" diff --git a/tests/test_rclone.py b/tests/test_rclone.py index caf93cd..c5a7103 100644 --- a/tests/test_rclone.py +++ b/tests/test_rclone.py @@ -19,7 +19,6 @@ def _make_tool(): return make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone") -@pytest.mark.usefixtures("_mock_graphql") class TestRcloneValidation: async def test_delete_requires_confirm(self) -> None: tool_fn = _make_tool() diff --git a/tests/test_storage.py b/tests/test_storage.py index f86e720..5f4ca7e 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -149,6 +149,15 @@ class TestSafeGet: result = safe_get({}, "missing", default=[]) assert result == [] + def test_zero_value_not_replaced_by_default(self) -> None: + assert safe_get({"temp": 0}, "temp", default="N/A") == 0 + + def test_false_value_not_replaced_by_default(self) -> None: + assert safe_get({"active": False}, "active", default=True) is False + + def test_empty_string_not_replaced_by_default(self) -> None: + assert safe_get({"name": ""}, "name", default="unknown") == "" + class TestStorageActions: async def test_shares(self, _mock_graphql: AsyncMock) -> None: diff --git a/tests/test_subscription_manager.py b/tests/test_subscription_manager.py index 53b5080..3c96794 100644 --- a/tests/test_subscription_manager.py +++ b/tests/test_subscription_manager.py @@ -60,8 +60,8 @@ class TestCapLogContentSmallData: class TestCapLogContentTruncation: """Content exceeding both byte AND line limits must be truncated to the last N lines.""" - def test_oversized_content_truncated_to_last_n_lines(self) -> None: - # 200 lines, limit 50 lines, byte limit effectively 0 → should keep last 50 lines + def test_oversized_content_truncated_and_byte_capped(self) -> None: + # 200 lines, tiny byte limit: must keep recent content within byte cap. lines = [f"line {i}" for i in range(200)] data = {"content": "\n".join(lines)} with ( @@ -70,14 +70,13 @@ class TestCapLogContentTruncation: ): result = _cap_log_content(data) result_lines = result["content"].splitlines() - assert len(result_lines) == 50 - # Must be the LAST 50 lines - assert result_lines[0] == "line 150" + assert len(result["content"].encode("utf-8", errors="replace")) <= 10 + # Must keep the most recent line suffix. assert result_lines[-1] == "line 199" - def test_content_with_fewer_lines_than_limit_not_truncated(self) -> None: - """If byte limit exceeded but line count ≤ limit → keep original (not truncated).""" - # 30 lines but byte limit 10 and line limit 50 → 30 < 50 so no truncation + def test_content_with_fewer_lines_than_limit_still_honors_byte_cap(self) -> None: + """If byte limit is exceeded, output must still be capped even with few lines.""" + # 30 lines, byte limit 10, line limit 50 -> must cap bytes regardless of line count lines = [f"line {i}" for i in range(30)] data = {"content": "\n".join(lines)} with ( @@ -85,8 +84,7 @@ class TestCapLogContentTruncation: patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), ): result = _cap_log_content(data) - # Original content preserved - assert result["content"] == data["content"] + assert len(result["content"].encode("utf-8", errors="replace")) <= 10 def test_non_content_keys_preserved_alongside_truncated_content(self) -> None: lines = [f"line {i}" for i in range(200)] @@ -98,7 +96,7 @@ class TestCapLogContentTruncation: result = _cap_log_content(data) assert result["path"] == "/var/log/syslog" assert result["total_lines"] == 200 - assert len(result["content"].splitlines()) == 50 + assert len(result["content"].encode("utf-8", errors="replace")) <= 10 class TestCapLogContentNested: @@ -112,7 +110,7 @@ class TestCapLogContentNested: patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), ): result = _cap_log_content(data) - assert len(result["logFile"]["content"].splitlines()) == 50 + assert len(result["logFile"]["content"].encode("utf-8", errors="replace")) <= 10 assert result["logFile"]["path"] == "/var/log/syslog" def test_deeply_nested_content_capped(self) -> None: @@ -123,9 +121,36 @@ class TestCapLogContentNested: patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50), ): result = _cap_log_content(data) - assert len(result["outer"]["inner"]["content"].splitlines()) == 50 + assert len(result["outer"]["inner"]["content"].encode("utf-8", errors="replace")) <= 10 def test_nested_non_content_keys_unaffected(self) -> None: data = {"metrics": {"cpu": 42.5, "memory": 8192}} result = _cap_log_content(data) assert result == data + + +class TestCapLogContentSingleMassiveLine: + """A single line larger than the byte cap must be hard-capped at byte level.""" + + def test_single_massive_line_hard_caps_bytes(self) -> None: + # One line, no newlines, larger than the byte cap. + # The while-loop can't reduce it (len(lines) == 1), so the + # last-resort byte-slice path at manager.py:65-69 must fire. + huge_content = "x" * 200 + data = {"content": huge_content} + with ( + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10), + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 5_000), + ): + result = _cap_log_content(data) + assert len(result["content"].encode("utf-8", errors="replace")) <= 10 + + def test_single_massive_line_input_not_mutated(self) -> None: + huge_content = "x" * 200 + data = {"content": huge_content} + with ( + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10), + patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 5_000), + ): + _cap_log_content(data) + assert data["content"] == huge_content diff --git a/unraid_mcp/__init__.py b/unraid_mcp/__init__.py index b6d6c59..a07b7be 100644 --- a/unraid_mcp/__init__.py +++ b/unraid_mcp/__init__.py @@ -1,13 +1,6 @@ -"""Unraid MCP Server Package. +"""Unraid MCP Server Package.""" -A modular MCP (Model Context Protocol) server that provides tools to interact -with an Unraid server's GraphQL API. -""" - -from importlib.metadata import PackageNotFoundError, version +from .version import VERSION -try: - __version__ = version("unraid-mcp") -except PackageNotFoundError: - __version__ = "0.0.0" +__version__ = VERSION diff --git a/unraid_mcp/config/logging.py b/unraid_mcp/config/logging.py index 0df21c6..f0193d8 100644 --- a/unraid_mcp/config/logging.py +++ b/unraid_mcp/config/logging.py @@ -47,7 +47,7 @@ class OverwriteFileHandler(logging.FileHandler): """Emit a record, checking file size periodically and overwriting if needed.""" self._emit_count += 1 if ( - self._emit_count % self._check_interval == 0 + (self._emit_count == 1 or self._emit_count % self._check_interval == 0) and self.stream and hasattr(self.stream, "name") ): @@ -249,5 +249,3 @@ if FASTMCP_AVAILABLE: else: # Fallback to our custom logger if FastMCP is not available logger = setup_logger() - # Also configure FastMCP logger for consistency - configure_fastmcp_logger_with_rich() diff --git a/unraid_mcp/config/settings.py b/unraid_mcp/config/settings.py index cdea8b6..1478199 100644 --- a/unraid_mcp/config/settings.py +++ b/unraid_mcp/config/settings.py @@ -5,12 +5,13 @@ and provides all configuration constants used throughout the application. """ import os -from importlib.metadata import PackageNotFoundError, version from pathlib import Path from typing import Any from dotenv import load_dotenv +from ..version import VERSION as APP_VERSION + # Get the script directory (config module location) SCRIPT_DIR = Path(__file__).parent # /home/user/code/unraid-mcp/unraid_mcp/config/ @@ -31,12 +32,6 @@ for dotenv_path in dotenv_paths: load_dotenv(dotenv_path=dotenv_path) break -# Application Version (single source of truth: pyproject.toml) -try: - VERSION = version("unraid-mcp") -except PackageNotFoundError: - VERSION = "0.0.0" - # Core API Configuration UNRAID_API_URL = os.getenv("UNRAID_API_URL") UNRAID_API_KEY = os.getenv("UNRAID_API_KEY") @@ -58,12 +53,18 @@ else: # Path to CA bundle # Logging Configuration LOG_LEVEL_STR = os.getenv("UNRAID_MCP_LOG_LEVEL", "INFO").upper() LOG_FILE_NAME = os.getenv("UNRAID_MCP_LOG_FILE", "unraid-mcp.log") -# Use /app/logs in Docker, project-relative logs/ directory otherwise -LOGS_DIR = Path("/app/logs") if Path("/app").is_dir() else PROJECT_ROOT / "logs" +# Use /.dockerenv as the container indicator for robust Docker detection. +IS_DOCKER = Path("/.dockerenv").exists() +LOGS_DIR = Path("/app/logs") if IS_DOCKER else PROJECT_ROOT / "logs" LOG_FILE_PATH = LOGS_DIR / LOG_FILE_NAME -# Ensure logs directory exists -LOGS_DIR.mkdir(parents=True, exist_ok=True) +# Ensure logs directory exists; if creation fails, fall back to /tmp. +try: + LOGS_DIR.mkdir(parents=True, exist_ok=True) +except OSError: + LOGS_DIR = PROJECT_ROOT / ".cache" / "logs" + LOGS_DIR.mkdir(parents=True, exist_ok=True) + LOG_FILE_PATH = LOGS_DIR / LOG_FILE_NAME # HTTP Client Configuration TIMEOUT_CONFIG = { @@ -109,3 +110,5 @@ def get_config_summary() -> dict[str, Any]: "config_valid": is_valid, "missing_config": missing if not is_valid else None, } +# Re-export application version from a single source of truth. +VERSION = APP_VERSION diff --git a/unraid_mcp/core/client.py b/unraid_mcp/core/client.py index ea568cf..eb452d6 100644 --- a/unraid_mcp/core/client.py +++ b/unraid_mcp/core/client.py @@ -7,6 +7,7 @@ to the Unraid API with proper timeout handling and error management. import asyncio import hashlib import json +import re import time from typing import Any, Final @@ -47,14 +48,14 @@ def _is_sensitive_key(key: str) -> bool: return any(s in key_lower for s in _SENSITIVE_KEYS) -def _redact_sensitive(obj: Any) -> Any: +def redact_sensitive(obj: Any) -> Any: """Recursively redact sensitive values from nested dicts/lists.""" if isinstance(obj, dict): return { - k: ("***" if _is_sensitive_key(k) else _redact_sensitive(v)) for k, v in obj.items() + k: ("***" if _is_sensitive_key(k) else redact_sensitive(v)) for k, v in obj.items() } if isinstance(obj, list): - return [_redact_sensitive(item) for item in obj] + return [redact_sensitive(item) for item in obj] return obj @@ -139,6 +140,7 @@ _CACHEABLE_QUERY_PREFIXES = frozenset( ) _CACHE_TTL_SECONDS = 60.0 +_OPERATION_NAME_PATTERN = re.compile(r"^(?:query\s+)?([_A-Za-z][_0-9A-Za-z]*)\b") class _QueryCache: @@ -160,9 +162,13 @@ class _QueryCache: @staticmethod def is_cacheable(query: str) -> bool: """Check if a query is eligible for caching based on its operation name.""" - if query.lstrip().startswith("mutation"): + normalized = query.lstrip() + if normalized.startswith("mutation"): return False - return any(prefix in query for prefix in _CACHEABLE_QUERY_PREFIXES) + match = _OPERATION_NAME_PATTERN.match(normalized) + if not match: + return False + return match.group(1) in _CACHEABLE_QUERY_PREFIXES def get(self, query: str, variables: dict[str, Any] | None) -> dict[str, Any] | None: """Return cached result if present and not expired, else None.""" @@ -324,7 +330,7 @@ async def make_graphql_request( logger.debug(f"Making GraphQL request to {UNRAID_API_URL}:") logger.debug(f"Query: {query[:200]}{'...' if len(query) > 200 else ''}") # Log truncated query if variables: - logger.debug(f"Variables: {_redact_sensitive(variables)}") + logger.debug(f"Variables: {redact_sensitive(variables)}") try: # Rate limit: consume a token before making the request diff --git a/unraid_mcp/core/exceptions.py b/unraid_mcp/core/exceptions.py index 34d84d1..7737155 100644 --- a/unraid_mcp/core/exceptions.py +++ b/unraid_mcp/core/exceptions.py @@ -45,13 +45,12 @@ def tool_error_handler( except ToolError: raise except TimeoutError as e: - logger.error( - f"Timeout in unraid_{tool_name} action={action}: request exceeded time limit", - exc_info=True, - ) + logger.exception(f"Timeout in unraid_{tool_name} action={action}: request exceeded time limit") raise ToolError( f"Request timed out executing {tool_name}/{action}. The Unraid API did not respond in time." ) from e except Exception as e: - logger.error(f"Error in unraid_{tool_name} action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute {tool_name}/{action}: {e!s}") from e + logger.exception(f"Error in unraid_{tool_name} action={action}") + raise ToolError( + f"Failed to execute {tool_name}/{action}. Check server logs for details." + ) from e diff --git a/unraid_mcp/core/types.py b/unraid_mcp/core/types.py index 9b7ec8a..dc6ad0d 100644 --- a/unraid_mcp/core/types.py +++ b/unraid_mcp/core/types.py @@ -20,33 +20,10 @@ class SubscriptionData: last_updated: datetime # Must be timezone-aware (UTC) subscription_type: str - -@dataclass(slots=True) -class SystemHealth: - """Container for system health status information. - - Note: last_checked must be timezone-aware (use datetime.now(UTC)). - """ - - is_healthy: bool - issues: list[str] - warnings: list[str] - last_checked: datetime # Must be timezone-aware (UTC) - component_status: dict[str, str] - - -@dataclass(slots=True) -class APIResponse: - """Container for standardized API response data.""" - - success: bool - data: dict[str, Any] | None = None - error: str | None = None - metadata: dict[str, Any] | None = None - - -# Type aliases for common data structures -ConfigValue = str | int | bool | float | None -ConfigDict = dict[str, ConfigValue] -GraphQLVariables = dict[str, Any] -HealthStatus = dict[str, str | bool | int | list[Any]] + def __post_init__(self) -> None: + if self.last_updated.tzinfo is None: + raise ValueError( + "last_updated must be timezone-aware; use datetime.now(UTC)" + ) + if not self.subscription_type.strip(): + raise ValueError("subscription_type must be a non-empty string") diff --git a/unraid_mcp/core/utils.py b/unraid_mcp/core/utils.py index 5b5ec9b..fd02ed2 100644 --- a/unraid_mcp/core/utils.py +++ b/unraid_mcp/core/utils.py @@ -1,6 +1,7 @@ """Shared utility functions for Unraid MCP tools.""" from typing import Any +from urllib.parse import urlparse def safe_get(data: dict[str, Any], *keys: str, default: Any = None) -> Any: @@ -45,6 +46,25 @@ def format_bytes(bytes_value: int | None) -> str: return f"{value:.2f} EB" +def safe_display_url(url: str | None) -> str | None: + """Return a redacted URL showing only scheme + host + port. + + Strips path, query parameters, credentials, and fragments to avoid + leaking internal network topology or embedded secrets (CWE-200). + """ + if not url: + return None + try: + parsed = urlparse(url) + host = parsed.hostname or "unknown" + if parsed.port: + return f"{parsed.scheme}://{host}:{parsed.port}" + return f"{parsed.scheme}://{host}" + except ValueError: + # urlparse raises ValueError for invalid URLs (e.g. contains control chars) + return "" + + def format_kb(k: Any) -> str: """Format kilobyte values into human-readable sizes. diff --git a/unraid_mcp/subscriptions/diagnostics.py b/unraid_mcp/subscriptions/diagnostics.py index f72d010..b9dbcad 100644 --- a/unraid_mcp/subscriptions/diagnostics.py +++ b/unraid_mcp/subscriptions/diagnostics.py @@ -19,6 +19,7 @@ from websockets.typing import Subprotocol from ..config.logging import logger from ..config.settings import UNRAID_API_KEY, UNRAID_API_URL from ..core.exceptions import ToolError +from ..core.utils import safe_display_url from .manager import subscription_manager from .resources import ensure_subscriptions_started from .utils import build_ws_ssl_context, build_ws_url @@ -162,6 +163,8 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: "note": "Connection successful, subscription may be waiting for events", } + except ToolError: + raise except Exception as e: logger.error(f"[TEST_SUBSCRIPTION] Error: {e}", exc_info=True) return {"error": str(e), "query_tested": subscription_query} @@ -193,7 +196,7 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: "environment": { "auto_start_enabled": subscription_manager.auto_start_enabled, "max_reconnect_attempts": subscription_manager.max_reconnect_attempts, - "unraid_api_url": UNRAID_API_URL[:50] + "..." if UNRAID_API_URL else None, + "unraid_api_url": safe_display_url(UNRAID_API_URL), "api_key_configured": bool(UNRAID_API_KEY), "websocket_url": None, }, diff --git a/unraid_mcp/subscriptions/manager.py b/unraid_mcp/subscriptions/manager.py index 0416e2e..c58cd5a 100644 --- a/unraid_mcp/subscriptions/manager.py +++ b/unraid_mcp/subscriptions/manager.py @@ -17,7 +17,7 @@ from websockets.typing import Subprotocol from ..config.logging import logger from ..config.settings import UNRAID_API_KEY -from ..core.client import _redact_sensitive +from ..core.client import redact_sensitive from ..core.types import SubscriptionData from .utils import build_ws_ssl_context, build_ws_url @@ -36,8 +36,7 @@ def _cap_log_content(data: dict[str, Any]) -> dict[str, Any]: field (from log subscriptions) exceeds the byte limit, truncate it to the most recent _MAX_RESOURCE_DATA_LINES lines. - Note: single lines larger than _MAX_RESOURCE_DATA_BYTES are not split and - will still be stored at full size; only multi-line content is truncated. + The final content is guaranteed to be <= _MAX_RESOURCE_DATA_BYTES. """ result: dict[str, Any] = {} for key, value in data.items(): @@ -49,15 +48,31 @@ def _cap_log_content(data: dict[str, Any]) -> dict[str, Any]: and len(value.encode("utf-8", errors="replace")) > _MAX_RESOURCE_DATA_BYTES ): lines = value.splitlines() + original_line_count = len(lines) + + # Keep most recent lines first. if len(lines) > _MAX_RESOURCE_DATA_LINES: - truncated = "\n".join(lines[-_MAX_RESOURCE_DATA_LINES:]) - logger.warning( - f"[RESOURCE] Capped log content from {len(lines)} to " - f"{_MAX_RESOURCE_DATA_LINES} lines ({len(value)} -> {len(truncated)} chars)" + lines = lines[-_MAX_RESOURCE_DATA_LINES:] + + # Enforce byte cap while preserving whole-line boundaries where possible. + truncated = "\n".join(lines) + truncated_bytes = truncated.encode("utf-8", errors="replace") + while len(lines) > 1 and len(truncated_bytes) > _MAX_RESOURCE_DATA_BYTES: + lines = lines[1:] + truncated = "\n".join(lines) + truncated_bytes = truncated.encode("utf-8", errors="replace") + + # Last resort: if a single line still exceeds cap, hard-cap bytes. + if len(truncated_bytes) > _MAX_RESOURCE_DATA_BYTES: + truncated = truncated_bytes[-_MAX_RESOURCE_DATA_BYTES :].decode( + "utf-8", errors="ignore" ) - result[key] = truncated - else: - result[key] = value + + logger.warning( + f"[RESOURCE] Capped log content from {original_line_count} to " + f"{len(lines)} lines ({len(value)} -> {len(truncated)} chars)" + ) + result[key] = truncated else: result[key] = value return result @@ -148,6 +163,7 @@ class SubscriptionManager: # Reset connection tracking self.reconnect_attempts[subscription_name] = 0 self.connection_states[subscription_name] = "starting" + self._connection_start_times.pop(subscription_name, None) async with self.subscription_lock: try: @@ -181,6 +197,7 @@ class SubscriptionManager: logger.debug(f"[SUBSCRIPTION:{subscription_name}] Task cancelled successfully") del self.active_subscriptions[subscription_name] self.connection_states[subscription_name] = "stopped" + self._connection_start_times.pop(subscription_name, None) logger.info(f"[SUBSCRIPTION:{subscription_name}] Subscription stopped") else: logger.warning(f"[SUBSCRIPTION:{subscription_name}] No active subscription to stop") @@ -322,7 +339,7 @@ class SubscriptionManager: ) logger.debug(f"[SUBSCRIPTION:{subscription_name}] Query: {query[:100]}...") logger.debug( - f"[SUBSCRIPTION:{subscription_name}] Variables: {_redact_sensitive(variables)}" + f"[SUBSCRIPTION:{subscription_name}] Variables: {redact_sensitive(variables)}" ) await websocket.send(json.dumps(subscription_message)) @@ -431,7 +448,8 @@ class SubscriptionManager: logger.error(f"[PROTOCOL:{subscription_name}] JSON decode error: {e}") except Exception as e: logger.error( - f"[DATA:{subscription_name}] Error processing message: {e}" + f"[DATA:{subscription_name}] Error processing message: {e}", + exc_info=True, ) msg_preview = ( message[:200] @@ -461,14 +479,22 @@ class SubscriptionManager: self.connection_states[subscription_name] = "invalid_uri" break # Don't retry on invalid URI + except ValueError as e: + # Non-retryable configuration error (e.g. UNRAID_API_URL not set) + error_msg = f"Configuration error: {e}" + logger.error(f"[WEBSOCKET:{subscription_name}] {error_msg}") + self.last_error[subscription_name] = error_msg + self.connection_states[subscription_name] = "error" + break # Don't retry on configuration errors + except Exception as e: error_msg = f"Unexpected error: {e}" - logger.error(f"[WEBSOCKET:{subscription_name}] {error_msg}") + logger.error(f"[WEBSOCKET:{subscription_name}] {error_msg}", exc_info=True) self.last_error[subscription_name] = error_msg self.connection_states[subscription_name] = "error" # Check if connection was stable before deciding on retry behavior - start_time = self._connection_start_times.get(subscription_name) + start_time = self._connection_start_times.pop(subscription_name, None) if start_time is not None: connected_duration = time.monotonic() - start_time if connected_duration >= _STABLE_CONNECTION_SECONDS: diff --git a/unraid_mcp/subscriptions/resources.py b/unraid_mcp/subscriptions/resources.py index f80a708..850ac1c 100644 --- a/unraid_mcp/subscriptions/resources.py +++ b/unraid_mcp/subscriptions/resources.py @@ -44,6 +44,7 @@ async def autostart_subscriptions() -> None: logger.info("[AUTOSTART] Auto-start process completed successfully") except Exception as e: logger.error(f"[AUTOSTART] Failed during auto-start process: {e}", exc_info=True) + raise # Propagate so ensure_subscriptions_started doesn't mark as started # Optional log file subscription log_path = os.getenv("UNRAID_AUTOSTART_LOG_PATH") diff --git a/unraid_mcp/tools/array.py b/unraid_mcp/tools/array.py index 0afe755..85fe93b 100644 --- a/unraid_mcp/tools/array.py +++ b/unraid_mcp/tools/array.py @@ -3,7 +3,7 @@ Provides the `unraid_array` tool with 5 actions for parity check management. """ -from typing import Any, Literal +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -53,6 +53,14 @@ ARRAY_ACTIONS = Literal[ "parity_status", ] +if set(get_args(ARRAY_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(ARRAY_ACTIONS)) + _extra = set(get_args(ARRAY_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"ARRAY_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) + def register_array_tool(mcp: FastMCP) -> None: """Register the unraid_array tool with the FastMCP instance.""" diff --git a/unraid_mcp/tools/docker.py b/unraid_mcp/tools/docker.py index cd31e7b..b125551 100644 --- a/unraid_mcp/tools/docker.py +++ b/unraid_mcp/tools/docker.py @@ -5,7 +5,7 @@ logs, networks, and update management. """ import re -from typing import Any, Literal +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -135,6 +135,14 @@ DOCKER_ACTIONS = Literal[ "check_updates", ] +if set(get_args(DOCKER_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(DOCKER_ACTIONS)) + _extra = set(get_args(DOCKER_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"DOCKER_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) + # Full PrefixedID: 64 hex chars + optional suffix (e.g., ":local") _DOCKER_ID_PATTERN = re.compile(r"^[a-f0-9]{64}(:[a-z0-9]+)?$", re.IGNORECASE) @@ -199,11 +207,6 @@ def get_available_container_names(containers: list[dict[str, Any]]) -> list[str] return names -def _looks_like_container_id(identifier: str) -> bool: - """Check if an identifier looks like a container ID (full or short hex prefix).""" - return bool(_DOCKER_ID_PATTERN.match(identifier) or _DOCKER_SHORT_ID_PATTERN.match(identifier)) - - async def _resolve_container_id(container_id: str, *, strict: bool = False) -> str: """Resolve a container name/identifier to its actual PrefixedID. @@ -233,12 +236,21 @@ async def _resolve_container_id(container_id: str, *, strict: bool = False) -> s # Short hex prefix: match by ID prefix before trying name matching if _DOCKER_SHORT_ID_PATTERN.match(container_id): id_lower = container_id.lower() + matches: list[dict[str, Any]] = [] for c in containers: cid = (c.get("id") or "").lower() if cid.startswith(id_lower) or cid.split(":")[0].startswith(id_lower): - actual_id = str(c.get("id", "")) - logger.info(f"Resolved short ID '{container_id}' -> '{actual_id}'") - return actual_id + matches.append(c) + if len(matches) == 1: + actual_id = str(matches[0].get("id", "")) + logger.info(f"Resolved short ID '{container_id}' -> '{actual_id}'") + return actual_id + if len(matches) > 1: + candidate_ids = [str(c.get("id", "")) for c in matches[:5]] + raise ToolError( + f"Short container ID prefix '{container_id}' is ambiguous. " + f"Matches: {', '.join(candidate_ids)}. Use a longer ID or exact name." + ) resolved = find_container_by_identifier(container_id, containers, strict=strict) if resolved: @@ -303,7 +315,7 @@ def register_docker_tool(mcp: FastMCP) -> None: if action == "network_details" and not network_id: raise ToolError("network_id is required for 'network_details' action") - if tail_lines < 1 or tail_lines > _MAX_TAIL_LINES: + if action == "logs" and (tail_lines < 1 or tail_lines > _MAX_TAIL_LINES): raise ToolError(f"tail_lines must be between 1 and {_MAX_TAIL_LINES}, got {tail_lines}") with tool_error_handler("docker", action, logger): @@ -335,12 +347,12 @@ def register_docker_tool(mcp: FastMCP) -> None: if action == "networks": data = await make_graphql_request(QUERIES["networks"]) - networks = data.get("dockerNetworks", []) + networks = safe_get(data, "dockerNetworks", default=[]) return {"networks": networks} if action == "network_details": data = await make_graphql_request(QUERIES["network_details"], {"id": network_id}) - return dict(data.get("dockerNetwork") or {}) + return dict(safe_get(data, "dockerNetwork", default={}) or {}) if action == "port_conflicts": data = await make_graphql_request(QUERIES["port_conflicts"]) diff --git a/unraid_mcp/tools/health.py b/unraid_mcp/tools/health.py index dc34559..e025171 100644 --- a/unraid_mcp/tools/health.py +++ b/unraid_mcp/tools/health.py @@ -6,8 +6,7 @@ connection testing, and subscription diagnostics. import datetime import time -from typing import Any, Literal -from urllib.parse import urlparse +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -21,31 +20,21 @@ from ..config.settings import ( ) from ..core.client import make_graphql_request from ..core.exceptions import ToolError, tool_error_handler - - -def _safe_display_url(url: str | None) -> str | None: - """Return a redacted URL showing only scheme + host + port. - - Strips path, query parameters, credentials, and fragments to avoid - leaking internal network topology or embedded secrets (CWE-200). - """ - if not url: - return None - try: - parsed = urlparse(url) - host = parsed.hostname or "unknown" - if parsed.port: - return f"{parsed.scheme}://{host}:{parsed.port}" - return f"{parsed.scheme}://{host}" - except ValueError: - # urlparse raises ValueError for invalid URLs (e.g. contains control chars) - return "" +from ..core.utils import safe_display_url ALL_ACTIONS = {"check", "test_connection", "diagnose"} HEALTH_ACTIONS = Literal["check", "test_connection", "diagnose"] +if set(get_args(HEALTH_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(HEALTH_ACTIONS)) + _extra = set(get_args(HEALTH_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + "HEALTH_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing in HEALTH_ACTIONS: {_missing}; extra in HEALTH_ACTIONS: {_extra}" + ) + # Severity ordering: only upgrade, never downgrade _SEVERITY = {"healthy": 0, "warning": 1, "degraded": 2, "unhealthy": 3} @@ -149,7 +138,7 @@ async def _comprehensive_check() -> dict[str, Any]: if info: health_info["unraid_system"] = { "status": "connected", - "url": _safe_display_url(UNRAID_API_URL), + "url": safe_display_url(UNRAID_API_URL), "machine_id": info.get("machineId"), "version": info.get("versions", {}).get("unraid"), "uptime": info.get("os", {}).get("uptime"), @@ -220,7 +209,7 @@ async def _comprehensive_check() -> dict[str, Any]: except Exception as e: # Intentionally broad: health checks must always return a result, # even on unexpected failures, so callers never get an unhandled exception. - logger.error(f"Health check failed: {e}") + logger.error(f"Health check failed: {e}", exc_info=True) return { "status": "unhealthy", "timestamp": datetime.datetime.now(datetime.UTC).isoformat(), @@ -293,10 +282,7 @@ async def _diagnose_subscriptions() -> dict[str, Any]: }, } - except ImportError: - return { - "error": "Subscription modules not available", - "timestamp": datetime.datetime.now(datetime.UTC).isoformat(), - } + except ImportError as e: + raise ToolError("Subscription modules not available") from e except Exception as e: raise ToolError(f"Failed to generate diagnostics: {e!s}") from e diff --git a/unraid_mcp/tools/info.py b/unraid_mcp/tools/info.py index b1287bb..75ff1d5 100644 --- a/unraid_mcp/tools/info.py +++ b/unraid_mcp/tools/info.py @@ -4,7 +4,7 @@ Provides the `unraid_info` tool with 19 read-only actions for retrieving system information, array status, network config, and server metadata. """ -from typing import Any, Literal +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -180,9 +180,9 @@ INFO_ACTIONS = Literal[ "ups_config", ] -if set(INFO_ACTIONS.__args__) != ALL_ACTIONS: - _missing = ALL_ACTIONS - set(INFO_ACTIONS.__args__) - _extra = set(INFO_ACTIONS.__args__) - ALL_ACTIONS +if set(get_args(INFO_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(INFO_ACTIONS)) + _extra = set(get_args(INFO_ACTIONS)) - ALL_ACTIONS raise RuntimeError( f"QUERIES keys and INFO_ACTIONS are out of sync. " f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" @@ -415,7 +415,8 @@ def register_info_tool(mcp: FastMCP) -> None: if action in list_actions: response_key, output_key = list_actions[action] items = data.get(response_key) or [] - return {output_key: items} + normalized_items = list(items) if isinstance(items, list) else [] + return {output_key: normalized_items} raise ToolError(f"Unhandled action '{action}' — this is a bug") diff --git a/unraid_mcp/tools/keys.py b/unraid_mcp/tools/keys.py index be9c539..191c970 100644 --- a/unraid_mcp/tools/keys.py +++ b/unraid_mcp/tools/keys.py @@ -4,7 +4,7 @@ Provides the `unraid_keys` tool with 5 actions for listing, viewing, creating, updating, and deleting API keys. """ -from typing import Any, Literal +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -55,6 +55,14 @@ KEY_ACTIONS = Literal[ "delete", ] +if set(get_args(KEY_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(KEY_ACTIONS)) + _extra = set(get_args(KEY_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"KEY_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) + def register_keys_tool(mcp: FastMCP) -> None: """Register the unraid_keys tool with the FastMCP instance.""" diff --git a/unraid_mcp/tools/notifications.py b/unraid_mcp/tools/notifications.py index 0df7e2a..3053a13 100644 --- a/unraid_mcp/tools/notifications.py +++ b/unraid_mcp/tools/notifications.py @@ -4,7 +4,7 @@ Provides the `unraid_notifications` tool with 9 actions for viewing, creating, archiving, and deleting system notifications. """ -from typing import Any, Literal +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -91,6 +91,14 @@ NOTIFICATION_ACTIONS = Literal[ "archive_all", ] +if set(get_args(NOTIFICATION_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(NOTIFICATION_ACTIONS)) + _extra = set(get_args(NOTIFICATION_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"NOTIFICATION_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) + def register_notifications_tool(mcp: FastMCP) -> None: """Register the unraid_notifications tool with the FastMCP instance.""" diff --git a/unraid_mcp/tools/rclone.py b/unraid_mcp/tools/rclone.py index 7c091cd..a9af93e 100644 --- a/unraid_mcp/tools/rclone.py +++ b/unraid_mcp/tools/rclone.py @@ -5,7 +5,7 @@ cloud storage remotes (S3, Google Drive, Dropbox, FTP, etc.). """ import re -from typing import Any, Literal +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -50,10 +50,18 @@ RCLONE_ACTIONS = Literal[ "delete_remote", ] +if set(get_args(RCLONE_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(RCLONE_ACTIONS)) + _extra = set(get_args(RCLONE_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"RCLONE_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) + # Max config entries to prevent abuse _MAX_CONFIG_KEYS = 50 # Pattern for suspicious key names (path traversal, shell metacharacters) -_DANGEROUS_KEY_PATTERN = re.compile(r"[.]{2}|[/\\;|`$(){}]") +_DANGEROUS_KEY_PATTERN = re.compile(r"\.\.|[/\\;|`$(){}]") # Max length for individual config values _MAX_VALUE_LENGTH = 4096 diff --git a/unraid_mcp/tools/storage.py b/unraid_mcp/tools/storage.py index 125595c..a50049e 100644 --- a/unraid_mcp/tools/storage.py +++ b/unraid_mcp/tools/storage.py @@ -5,7 +5,7 @@ unassigned devices, log files, and log content retrieval. """ import os -from typing import Any, Literal +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -69,6 +69,14 @@ STORAGE_ACTIONS = Literal[ "logs", ] +if set(get_args(STORAGE_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(STORAGE_ACTIONS)) + _extra = set(get_args(STORAGE_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"STORAGE_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) + def register_storage_tool(mcp: FastMCP) -> None: """Register the unraid_storage tool with the FastMCP instance.""" @@ -96,7 +104,7 @@ def register_storage_tool(mcp: FastMCP) -> None: if action == "disk_details" and not disk_id: raise ToolError("disk_id is required for 'disk_details' action") - if tail_lines < 1 or tail_lines > _MAX_TAIL_LINES: + if action == "logs" and (tail_lines < 1 or tail_lines > _MAX_TAIL_LINES): raise ToolError(f"tail_lines must be between 1 and {_MAX_TAIL_LINES}, got {tail_lines}") if action == "logs": diff --git a/unraid_mcp/tools/virtualization.py b/unraid_mcp/tools/virtualization.py index baa421a..89166b5 100644 --- a/unraid_mcp/tools/virtualization.py +++ b/unraid_mcp/tools/virtualization.py @@ -4,7 +4,7 @@ Provides the `unraid_vm` tool with 9 actions for VM lifecycle management including start, stop, pause, resume, force stop, reboot, and reset. """ -from typing import Any, Literal +from typing import Any, Literal, get_args from fastmcp import FastMCP @@ -73,6 +73,14 @@ VM_ACTIONS = Literal[ ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) +if set(get_args(VM_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(VM_ACTIONS)) + _extra = set(get_args(VM_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"VM_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) + def register_vm_tool(mcp: FastMCP) -> None: """Register the unraid_vm tool with the FastMCP instance.""" diff --git a/unraid_mcp/version.py b/unraid_mcp/version.py new file mode 100644 index 0000000..b97e207 --- /dev/null +++ b/unraid_mcp/version.py @@ -0,0 +1,11 @@ +"""Application version helpers.""" + +from importlib.metadata import PackageNotFoundError, version + + +__all__ = ["VERSION"] + +try: + VERSION = version("unraid-mcp") +except PackageNotFoundError: + VERSION = "0.0.0" From 2a5b19c42f78c582af6219f3fa407e3eab8a0b7b Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Thu, 19 Feb 2026 02:25:21 -0500 Subject: [PATCH 05/34] test: address final 3 CodeRabbit review threads - http_layer/test_request_construction.py: tighten JSON error match from "invalid response" to "invalid response.*not valid JSON" to prevent false positives - safety/test_destructive_guards.py: add test_docker_update_all_with_confirm to TestConfirmAllowsExecution (was missing positive coverage for update_all) - safety/test_destructive_guards.py: expand conftest import comment to explain why the direct conftest import is intentional and correct --- tests/http_layer/test_request_construction.py | 2 +- tests/safety/test_destructive_guards.py | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/tests/http_layer/test_request_construction.py b/tests/http_layer/test_request_construction.py index 766fb80..e588c77 100644 --- a/tests/http_layer/test_request_construction.py +++ b/tests/http_layer/test_request_construction.py @@ -195,7 +195,7 @@ class TestHttpErrorHandling: @respx.mock async def test_invalid_json_response(self) -> None: respx.post(API_URL).mock(return_value=httpx.Response(200, text="not json")) - with pytest.raises(ToolError, match="invalid response"): + with pytest.raises(ToolError, match=r"invalid response.*not valid JSON"): await make_graphql_request("query { online }") diff --git a/tests/safety/test_destructive_guards.py b/tests/safety/test_destructive_guards.py index 115849f..1512906 100644 --- a/tests/safety/test_destructive_guards.py +++ b/tests/safety/test_destructive_guards.py @@ -10,8 +10,10 @@ from unittest.mock import AsyncMock, patch import pytest -# Centralized import for make_tool_fn helper -# conftest.py sits in tests/ and is importable without __init__.py +# conftest.py is the shared test-helper module for this project. +# pytest automatically adds tests/ to sys.path, making it importable here +# without a package __init__.py. Do NOT add tests/__init__.py — it breaks +# conftest.py's fixture auto-discovery. from conftest import make_tool_fn from unraid_mcp.core.exceptions import ToolError @@ -271,6 +273,15 @@ class TestConfirmationGuards: class TestConfirmAllowsExecution: """Destructive actions with confirm=True should reach the GraphQL layer.""" + async def test_docker_update_all_with_confirm(self, _mock_docker_graphql: AsyncMock) -> None: + _mock_docker_graphql.return_value = { + "docker": {"updateAllContainers": [{"id": "c1", "names": ["app"], "state": "running", "status": "Up"}]} + } + tool_fn = make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker") + result = await tool_fn(action="update_all", confirm=True) + assert result["success"] is True + assert result["action"] == "update_all" + async def test_docker_remove_with_confirm(self, _mock_docker_graphql: AsyncMock) -> None: cid = "a" * 64 + ":local" _mock_docker_graphql.side_effect = [ From 06f18f32fc579c67680327e97a148785ddcedf1c Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Fri, 13 Mar 2026 00:53:51 -0400 Subject: [PATCH 06/34] chore: update gitignore, bump to 0.2.1, apply CodeRabbit fixes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add .windsurf/, *.bak*, .1code/, .emdash.json to .gitignore - Sync standard gitignore entries per project conventions - Apply final test/tool fixes from CodeRabbit review threads - Update GraphQL schema to latest introspection snapshot - Bump version 0.2.0 → 0.2.1 Co-authored-by: Claude --- .gitignore | 7 + Dockerfile | 6 +- docker-compose.yml | 5 +- docs/research/feature-gap-analysis.md | 18 +- docs/research/unraid-api-crawl.md | 4 +- docs/research/unraid-api-source-analysis.md | 10 +- docs/unraid-schema.graphql | 4256 +++++++++++++---- pyproject.toml | 4 +- tests/http_layer/test_request_construction.py | 23 +- tests/safety/test_destructive_guards.py | 6 +- tests/test_array.py | 15 +- tests/test_keys.py | 22 +- tests/test_notifications.py | 22 +- unraid_mcp/tools/array.py | 6 +- unraid_mcp/tools/keys.py | 14 +- unraid_mcp/tools/notifications.py | 14 +- 16 files changed, 3294 insertions(+), 1138 deletions(-) diff --git a/.gitignore b/.gitignore index 766ee6d..ea0eee9 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,13 @@ logs/ # IDE/Editor .bivvy .cursor +.windsurf/ +.1code/ +.emdash.json + +# Backup files +*.bak +*.bak-* # Claude Code user settings (gitignore local settings) .claude/settings.local.json diff --git a/Dockerfile b/Dockerfile index bf7baa4..876256c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,8 +4,8 @@ FROM python:3.12-slim # Set the working directory in the container WORKDIR /app -# Install uv -COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /usr/local/bin/ +# Install uv (pinned tag to avoid mutable latest) +COPY --from=ghcr.io/astral-sh/uv:0.5.4 /uv /uvx /usr/local/bin/ # Create non-root user with home directory and give ownership of /app RUN groupadd --gid 1000 appuser && \ @@ -42,7 +42,7 @@ ENV UNRAID_MCP_LOG_LEVEL="INFO" # Health check HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ - CMD ["python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:6970/mcp')"] + CMD ["python", "-c", "import os, urllib.request; port = os.getenv('UNRAID_MCP_PORT', '6970'); urllib.request.urlopen(f'http://localhost:{port}/mcp')"] # Run unraid-mcp-server when the container launches CMD ["uv", "run", "unraid-mcp-server"] diff --git a/docker-compose.yml b/docker-compose.yml index 7639bcb..db6a9cb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -10,14 +10,15 @@ services: - ALL tmpfs: - /tmp:noexec,nosuid,size=64m + - /app/logs:noexec,nosuid,size=16m ports: # HostPort:ContainerPort (maps to UNRAID_MCP_PORT inside the container, default 6970) # Change the host port (left side) if 6970 is already in use on your host - "${UNRAID_MCP_PORT:-6970}:${UNRAID_MCP_PORT:-6970}" environment: # Core API Configuration (Required) - - UNRAID_API_URL=${UNRAID_API_URL} - - UNRAID_API_KEY=${UNRAID_API_KEY} + - UNRAID_API_URL=${UNRAID_API_URL:?UNRAID_API_URL is required} + - UNRAID_API_KEY=${UNRAID_API_KEY:?UNRAID_API_KEY is required} # MCP Server Settings - UNRAID_MCP_PORT=${UNRAID_MCP_PORT:-6970} diff --git a/docs/research/feature-gap-analysis.md b/docs/research/feature-gap-analysis.md index aef3907..588354f 100644 --- a/docs/research/feature-gap-analysis.md +++ b/docs/research/feature-gap-analysis.md @@ -243,13 +243,13 @@ Every mutation identified across all research documents with their parameters an | Mutation | Parameters | Returns | Current MCP Coverage | |----------|------------|---------|---------------------| | `login(username, password)` | `String!`, `String!` | `String` | **NO** | -| `createApiKey(input)` | `CreateApiKeyInput!` | `ApiKeyWithSecret!` | **NO** | +| `apiKey.create(input)` | `CreateApiKeyInput!` | `ApiKey!` | **NO** | | `addPermission(input)` | `AddPermissionInput!` | `Boolean!` | **NO** | | `addRoleForUser(input)` | `AddRoleForUserInput!` | `Boolean!` | **NO** | | `addRoleForApiKey(input)` | `AddRoleForApiKeyInput!` | `Boolean!` | **NO** | | `removeRoleFromApiKey(input)` | `RemoveRoleFromApiKeyInput!` | `Boolean!` | **NO** | -| `deleteApiKeys(input)` | API key IDs | `Boolean` | **NO** | -| `updateApiKey(input)` | API key update data | `Boolean` | **NO** | +| `apiKey.delete(input)` | API key IDs | `Boolean!` | **NO** | +| `apiKey.update(input)` | API key update data | `ApiKey!` | **NO** | | `addUser(input)` | `addUserInput!` | `User` | **NO** | | `deleteUser(input)` | `deleteUserInput!` | `User` | **NO** | @@ -417,11 +417,11 @@ GRAPHQL_PUBSUB_CHANNEL { | Input Type | Used By | Fields | |-----------|---------|--------| -| `CreateApiKeyInput` | `createApiKey` | `name!`, `description`, `roles[]`, `permissions[]`, `overwrite` | +| `CreateApiKeyInput` | `apiKey.create` | `name!`, `description`, `roles[]`, `permissions[]`, `overwrite` | | `AddPermissionInput` | `addPermission` | `resource!`, `actions![]` | | `AddRoleForUserInput` | `addRoleForUser` | User + role assignment | -| `AddRoleForApiKeyInput` | `addRoleForApiKey` | API key + role assignment | -| `RemoveRoleFromApiKeyInput` | `removeRoleFromApiKey` | API key + role removal | +| `AddRoleForApiKeyInput` | `apiKey.addRole` | API key + role assignment | +| `RemoveRoleFromApiKeyInput` | `apiKey.removeRole` | API key + role removal | | `arrayDiskInput` | `addDiskToArray`, `removeDiskFromArray` | Disk assignment data | | `ConnectSignInInput` | `connectSignIn` | Connect credentials | | `EnableDynamicRemoteAccessInput` | `enableDynamicRemoteAccess` | Remote access config | @@ -619,9 +619,9 @@ The current MCP server has 10 tools (76 actions) after consolidation. The follow |--------------|---------------|---------------| | `list_api_keys()` | `apiKeys` query | Key inventory | | `get_api_key(id)` | `apiKey(id)` query | Key details | -| `create_api_key(input)` | `createApiKey` mutation | Key provisioning | -| `delete_api_keys(input)` | `deleteApiKeys` mutation | Key cleanup | -| `update_api_key(input)` | `updateApiKey` mutation | Key modification | +| `create_api_key(input)` | `apiKey.create` mutation | Key provisioning | +| `delete_api_keys(input)` | `apiKey.delete` mutation | Key cleanup | +| `update_api_key(input)` | `apiKey.update` mutation | Key modification | #### Remote Access Management (0 tools currently, 1 query + 3 mutations) diff --git a/docs/research/unraid-api-crawl.md b/docs/research/unraid-api-crawl.md index 9117a40..e854afa 100644 --- a/docs/research/unraid-api-crawl.md +++ b/docs/research/unraid-api-crawl.md @@ -678,11 +678,9 @@ type Query { ```graphql type Mutation { - createApiKey(input: CreateApiKeyInput!): ApiKeyWithSecret! + apiKey: ApiKeyMutations! addPermission(input: AddPermissionInput!): Boolean! addRoleForUser(input: AddRoleForUserInput!): Boolean! - addRoleForApiKey(input: AddRoleForApiKeyInput!): Boolean! - removeRoleFromApiKey(input: RemoveRoleFromApiKeyInput!): Boolean! startArray: Array stopArray: Array addDiskToArray(input: arrayDiskInput): Array diff --git a/docs/research/unraid-api-source-analysis.md b/docs/research/unraid-api-source-analysis.md index f225aff..d9b7155 100644 --- a/docs/research/unraid-api-source-analysis.md +++ b/docs/research/unraid-api-source-analysis.md @@ -565,11 +565,11 @@ api/src/unraid-api/graph/resolvers/ | **RClone** | `createRCloneRemote(input)` | Create remote storage | CREATE_ANY:FLASH | | **RClone** | `deleteRCloneRemote(input)` | Delete remote storage | DELETE_ANY:FLASH | | **UPS** | `configureUps(config)` | Update UPS configuration | UPDATE_ANY:* | -| **API Keys** | `createApiKey(input)` | Create API key | CREATE_ANY:API_KEY | -| **API Keys** | `addRoleForApiKey(input)` | Add role to key | UPDATE_ANY:API_KEY | -| **API Keys** | `removeRoleFromApiKey(input)` | Remove role from key | UPDATE_ANY:API_KEY | -| **API Keys** | `deleteApiKeys(input)` | Delete API keys | DELETE_ANY:API_KEY | -| **API Keys** | `updateApiKey(input)` | Update API key | UPDATE_ANY:API_KEY | +| **API Keys** | `apiKey.create(input)` | Create API key | CREATE_ANY:API_KEY | +| **API Keys** | `apiKey.addRole(input)` | Add role to key | UPDATE_ANY:API_KEY | +| **API Keys** | `apiKey.removeRole(input)` | Remove role from key | UPDATE_ANY:API_KEY | +| **API Keys** | `apiKey.delete(input)` | Delete API keys | DELETE_ANY:API_KEY | +| **API Keys** | `apiKey.update(input)` | Update API key | UPDATE_ANY:API_KEY | --- diff --git a/docs/unraid-schema.graphql b/docs/unraid-schema.graphql index 43c1ef7..2f51f85 100644 --- a/docs/unraid-schema.graphql +++ b/docs/unraid-schema.graphql @@ -1,140 +1,645 @@ -# Unraid GraphQL API Schema (SDL) -# Extracted from live introspection of the Unraid API. -# Used for offline validation of MCP tool queries and mutations. +# ------------------------------------------------------ +# THIS FILE WAS AUTOMATICALLY GENERATED (DO NOT MODIFY) +# ------------------------------------------------------ -# ============================================================================ -# Custom Scalars -# ============================================================================ -scalar BigInt +"""Directive to document required permissions for fields""" +directive @usePermissions( + """The action required for access (must be a valid AuthAction enum value)""" + action: String + + """The resource required for access (must be a valid Resource enum value)""" + resource: String +) on FIELD_DEFINITION + +type ParityCheck { + """Date of the parity check""" + date: DateTime + + """Duration of the parity check in seconds""" + duration: Int + + """Speed of the parity check, in MB/s""" + speed: String + + """Status of the parity check""" + status: ParityCheckStatus! + + """Number of errors during the parity check""" + errors: Int + + """Progress percentage of the parity check""" + progress: Int + + """Whether corrections are being written to parity""" + correcting: Boolean + + """Whether the parity check is paused""" + paused: Boolean + + """Whether the parity check is running""" + running: Boolean +} + +""" +A date-time string at UTC, such as 2019-12-03T09:54:33Z, compliant with the date-time format. +""" scalar DateTime -scalar JSON -scalar PrefixedID -scalar Port -# ============================================================================ -# Enums -# ============================================================================ -enum ArrayState { - DISABLE_DISK - INVALID_EXPANSION - NEW_ARRAY - NEW_DISK_TOO_SMALL - NO_DATA_DISKS - PARITY_NOT_BIGGEST - RECON_DISK - STARTED - STOPPED - SWAP_DSBL - TOO_MANY_MISSING_DISKS +enum ParityCheckStatus { + NEVER_RUN + RUNNING + PAUSED + COMPLETED + CANCELLED + FAILED } -enum ArrayStateInputState { - START - STOP +type Capacity { + """Free capacity""" + free: String! + + """Used capacity""" + used: String! + + """Total capacity""" + total: String! } -enum ArrayDiskFsColor { - BLUE_BLINK - BLUE_ON - GREEN_BLINK - GREEN_ON - GREY_OFF - RED_OFF - RED_ON - YELLOW_BLINK - YELLOW_ON +type ArrayCapacity { + """Capacity in kilobytes""" + kilobytes: Capacity! + + """Capacity in number of disks""" + disks: Capacity! } +type ArrayDisk implements Node { + id: PrefixedID! + + """ + Array slot number. Parity1 is always 0 and Parity2 is always 29. Array slots will be 1 - 28. Cache slots are 30 - 53. Flash is 54. + """ + idx: Int! + name: String + device: String + + """(KB) Disk Size total""" + size: BigInt + status: ArrayDiskStatus + + """Is the disk a HDD or SSD.""" + rotational: Boolean + + """Disk temp - will be NaN if array is not started or DISK_NP""" + temp: Int + + """ + Count of I/O read requests sent to the device I/O drivers. These statistics may be cleared at any time. + """ + numReads: BigInt + + """ + Count of I/O writes requests sent to the device I/O drivers. These statistics may be cleared at any time. + """ + numWrites: BigInt + + """ + Number of unrecoverable errors reported by the device I/O drivers. Missing data due to unrecoverable array read errors is filled in on-the-fly using parity reconstruct (and we attempt to write this data back to the sector(s) which failed). Any unrecoverable write error results in disabling the disk. + """ + numErrors: BigInt + + """(KB) Total Size of the FS (Not present on Parity type drive)""" + fsSize: BigInt + + """(KB) Free Size on the FS (Not present on Parity type drive)""" + fsFree: BigInt + + """(KB) Used Size on the FS (Not present on Parity type drive)""" + fsUsed: BigInt + exportable: Boolean + + """ + Type of Disk - used to differentiate Boot / Cache / Flash / Data (DATA) / Parity + """ + type: ArrayDiskType! + + """(%) Disk space left to warn""" + warning: Int + + """(%) Disk space left for critical""" + critical: Int + + """File system type for the disk""" + fsType: String + + """User comment on disk""" + comment: String + + """File format (ex MBR: 4KiB-aligned)""" + format: String + + """ata | nvme | usb | (others)""" + transport: String + color: ArrayDiskFsColor + + """Whether the disk is currently spinning""" + isSpinning: Boolean +} + +interface Node { + id: PrefixedID! +} + +""" +The `BigInt` scalar type represents non-fractional signed whole numeric values. +""" +scalar BigInt + enum ArrayDiskStatus { - DISK_DSBL - DISK_DSBL_NEW - DISK_INVALID - DISK_NEW DISK_NP - DISK_NP_DSBL - DISK_NP_MISSING DISK_OK + DISK_NP_MISSING + DISK_INVALID DISK_WRONG + DISK_DSBL + DISK_NP_DSBL + DISK_DSBL_NEW + DISK_NEW } enum ArrayDiskType { - CACHE DATA - FLASH PARITY + BOOT + FLASH + CACHE } -enum AuthAction { - CREATE_ANY - CREATE_OWN - DELETE_ANY - DELETE_OWN - READ_ANY - READ_OWN - UPDATE_ANY - UPDATE_OWN +enum ArrayDiskFsColor { + GREEN_ON + GREEN_BLINK + BLUE_ON + BLUE_BLINK + YELLOW_ON + YELLOW_BLINK + RED_ON + RED_OFF + GREY_OFF } -enum ContainerPortType { - TCP - UDP +type UnraidArray implements Node { + id: PrefixedID! + + """Current array state""" + state: ArrayState! + + """Current array capacity""" + capacity: ArrayCapacity! + + """Returns the active boot disk""" + boot: ArrayDisk + + """ + All detected boot devices: every Boot entry for internal boot, including mirrored members when configured, or the mounted /boot Flash entry for legacy USB boot + """ + bootDevices: [ArrayDisk!]! + + """Parity disks in the current array""" + parities: [ArrayDisk!]! + + """Current parity check status""" + parityCheckStatus: ParityCheck! + + """Data disks in the current array""" + disks: [ArrayDisk!]! + + """Caches in the current array""" + caches: [ArrayDisk!]! } -enum ContainerState { - EXITED - RUNNING +enum ArrayState { + STARTED + STOPPED + NEW_ARRAY + RECON_DISK + DISABLE_DISK + SWAP_DSBL + INVALID_EXPANSION + PARITY_NOT_BIGGEST + TOO_MANY_MISSING_DISKS + NEW_DISK_TOO_SMALL + NO_DATA_DISKS } -enum ConfigErrorState { - INELIGIBLE - INVALID - NO_KEY_SERVER - UNKNOWN_ERROR - WITHDRAWN +type Share implements Node { + id: PrefixedID! + + """Display name""" + name: String + + """(KB) Free space""" + free: BigInt + + """(KB) Used Size""" + used: BigInt + + """(KB) Total size""" + size: BigInt + + """Disks that are included in this share""" + include: [String!] + + """Disks that are excluded from this share""" + exclude: [String!] + + """Is this share cached""" + cache: Boolean + + """Original name""" + nameOrig: String + + """User comment""" + comment: String + + """Allocator""" + allocator: String + + """Split level""" + splitLevel: String + + """Floor""" + floor: String + + """COW""" + cow: String + + """Color""" + color: String + + """LUKS status""" + luksStatus: String } +type DiskPartition { + """The name of the partition""" + name: String! + + """The filesystem type of the partition""" + fsType: DiskFsType! + + """The size of the partition in bytes""" + size: Float! +} + +"""The type of filesystem on the disk partition""" enum DiskFsType { + XFS BTRFS + VFAT + ZFS EXT4 NTFS - VFAT - XFS - ZFS } +type Disk implements Node { + id: PrefixedID! + + """The device path of the disk (e.g. /dev/sdb)""" + device: String! + + """The type of disk (e.g. SSD, HDD)""" + type: String! + + """The model name of the disk""" + name: String! + + """The manufacturer of the disk""" + vendor: String! + + """The total size of the disk in bytes""" + size: Float! + + """The number of bytes per sector""" + bytesPerSector: Float! + + """The total number of cylinders on the disk""" + totalCylinders: Float! + + """The total number of heads on the disk""" + totalHeads: Float! + + """The total number of sectors on the disk""" + totalSectors: Float! + + """The total number of tracks on the disk""" + totalTracks: Float! + + """The number of tracks per cylinder""" + tracksPerCylinder: Float! + + """The number of sectors per track""" + sectorsPerTrack: Float! + + """The firmware revision of the disk""" + firmwareRevision: String! + + """The serial number of the disk""" + serialNum: String! + + """ + Device identifier from emhttp devs.ini used by disk assignment commands + """ + emhttpDeviceId: String + + """The interface type of the disk""" + interfaceType: DiskInterfaceType! + + """The SMART status of the disk""" + smartStatus: DiskSmartStatus! + + """The current temperature of the disk in Celsius""" + temperature: Float + + """The partitions on the disk""" + partitions: [DiskPartition!]! + + """Whether the disk is spinning or not""" + isSpinning: Boolean! +} + +"""The type of interface the disk uses to connect to the system""" enum DiskInterfaceType { - PCIE SAS SATA - UNKNOWN USB + PCIE + UNKNOWN } +""" +The SMART (Self-Monitoring, Analysis and Reporting Technology) status of the disk +""" enum DiskSmartStatus { OK UNKNOWN } -enum NotificationImportance { - ALERT - INFO - WARNING +type KeyFile { + location: String + contents: String } -enum NotificationType { - ARCHIVE - UNREAD +type Registration implements Node { + id: PrefixedID! + type: registrationType + keyFile: KeyFile + state: RegistrationState + expiration: String + updateExpiration: String } -enum ParityCheckStatus { - CANCELLED - COMPLETED - FAILED - NEVER_RUN - PAUSED - RUNNING +enum registrationType { + BASIC + PLUS + PRO + STARTER + UNLEASHED + LIFETIME + INVALID + TRIAL } +enum RegistrationState { + TRIAL + BASIC + PLUS + PRO + STARTER + UNLEASHED + LIFETIME + EEXPIRED + EGUID + EGUID1 + ETRIAL + ENOKEYFILE + ENOKEYFILE1 + ENOKEYFILE2 + ENOFLASH + ENOFLASH1 + ENOFLASH2 + ENOFLASH3 + ENOFLASH4 + ENOFLASH5 + ENOFLASH6 + ENOFLASH7 + EBLACKLISTED + EBLACKLISTED1 + EBLACKLISTED2 + ENOCONN +} + +type Vars implements Node { + id: PrefixedID! + + """Unraid version""" + version: String + maxArraysz: Int + maxCachesz: Int + + """Machine hostname""" + name: String + timeZone: String + comment: String + security: String + workgroup: String + domain: String + domainShort: String + hideDotFiles: Boolean + localMaster: Boolean + enableFruit: String + + """Should a NTP server be used for time sync?""" + useNtp: Boolean + + """NTP Server 1""" + ntpServer1: String + + """NTP Server 2""" + ntpServer2: String + + """NTP Server 3""" + ntpServer3: String + + """NTP Server 4""" + ntpServer4: String + domainLogin: String + sysModel: String + sysArraySlots: Int + sysCacheSlots: Int + sysFlashSlots: Int + useSsl: Boolean + + """Port for the webui via HTTP""" + port: Int + + """Port for the webui via HTTPS""" + portssl: Int + localTld: String + bindMgt: Boolean + + """Should telnet be enabled?""" + useTelnet: Boolean + porttelnet: Int + useSsh: Boolean + portssh: Int + startPage: String + startArray: Boolean + spindownDelay: String + queueDepth: String + spinupGroups: Boolean + defaultFormat: String + defaultFsType: String + shutdownTimeout: Int + luksKeyfile: String + pollAttributes: String + pollAttributesDefault: String + pollAttributesStatus: String + nrRequests: Int + nrRequestsDefault: Int + nrRequestsStatus: String + mdNumStripes: Int + mdNumStripesDefault: Int + mdNumStripesStatus: String + mdSyncWindow: Int + mdSyncWindowDefault: Int + mdSyncWindowStatus: String + mdSyncThresh: Int + mdSyncThreshDefault: Int + mdSyncThreshStatus: String + mdWriteMethod: Int + mdWriteMethodDefault: String + mdWriteMethodStatus: String + shareDisk: String + shareUser: String + shareUserInclude: String + shareUserExclude: String + shareSmbEnabled: Boolean + shareNfsEnabled: Boolean + shareAfpEnabled: Boolean + shareInitialOwner: String + shareInitialGroup: String + shareCacheEnabled: Boolean + shareCacheFloor: String + shareMoverSchedule: String + shareMoverLogging: Boolean + fuseRemember: String + fuseRememberDefault: String + fuseRememberStatus: String + fuseDirectio: String + fuseDirectioDefault: String + fuseDirectioStatus: String + shareAvahiEnabled: Boolean + shareAvahiSmbName: String + shareAvahiSmbModel: String + shareAvahiAfpName: String + shareAvahiAfpModel: String + safeMode: Boolean + startMode: String + configValid: Boolean + configError: ConfigErrorState + joinStatus: String + deviceCount: Int + flashGuid: String + flashProduct: String + flashVendor: String + regCheck: String + regFile: String + regGuid: String + regTy: registrationType + regState: RegistrationState + + """Registration owner""" + regTo: String + regTm: String + regTm2: String + regGen: String + sbName: String + sbVersion: String + sbUpdated: String + sbEvents: Int + sbState: String + sbClean: Boolean + sbSynced: Int + sbSyncErrs: Int + sbSynced2: Int + sbSyncExit: String + sbNumDisks: Int + mdColor: String + mdNumDisks: Int + mdNumDisabled: Int + mdNumInvalid: Int + mdNumMissing: Int + mdNumNew: Int + mdNumErased: Int + mdResync: Int + mdResyncCorr: String + mdResyncPos: String + mdResyncDb: String + mdResyncDt: String + mdResyncAction: String + mdResyncSize: Int + mdState: String + mdVersion: String + cacheNumDevices: Int + cacheSbNumDisks: Int + fsState: String + bootEligible: Boolean + enableBootTransfer: String + reservedNames: String + + """Human friendly string of array events happening""" + fsProgress: String + + """ + Percentage from 0 - 100 while upgrading a disk or swapping parity drives + """ + fsCopyPrcnt: Int + fsNumMounted: Int + fsNumUnmountable: Int + fsUnmountableMask: String + + """Total amount of user shares""" + shareCount: Int + + """Total amount shares with SMB enabled""" + shareSmbCount: Int + + """Total amount shares with NFS enabled""" + shareNfsCount: Int + + """Total amount shares with AFP enabled""" + shareAfpCount: Int + shareMoverActive: Boolean + csrfToken: String +} + +"""Possible error states for configuration""" +enum ConfigErrorState { + UNKNOWN_ERROR + INELIGIBLE + INVALID + NO_KEY_SERVER + WITHDRAWN +} + +type Permission { + resource: Resource! + + """Actions allowed on this resource""" + actions: [AuthAction!]! +} + +"""Available resources for permissions""" enum Resource { ACTIVATION_CODE API_KEY @@ -167,64 +672,155 @@ enum Resource { WELCOME } +"""Authentication actions with possession (e.g., create:any, read:own)""" +enum AuthAction { + """Create any resource""" + CREATE_ANY + + """Create own resource""" + CREATE_OWN + + """Read any resource""" + READ_ANY + + """Read own resource""" + READ_OWN + + """Update any resource""" + UPDATE_ANY + + """Update own resource""" + UPDATE_OWN + + """Delete any resource""" + DELETE_ANY + + """Delete own resource""" + DELETE_OWN +} + +type ApiKey implements Node { + id: PrefixedID! + key: String! + name: String! + description: String + roles: [Role!]! + createdAt: String! + permissions: [Permission!]! +} + +"""Available roles for API keys and users""" enum Role { + """Full administrative access to all resources""" ADMIN + + """Internal Role for Unraid Connect""" CONNECT + + """Basic read access to user profile only""" GUEST + + """Read-only access to all resources""" VIEWER } -enum RegistrationState { - BASIC - EBLACKLISTED - EBLACKLISTED1 - EBLACKLISTED2 - EEXPIRED - EGUID - EGUID1 - ENOCONN - ENOFLASH - ENOFLASH1 - ENOFLASH2 - ENOFLASH3 - ENOFLASH4 - ENOFLASH5 - ENOFLASH6 - ENOFLASH7 - ENOKEYFILE - ENOKEYFILE1 - ENOKEYFILE2 - ETRIAL - LIFETIME - PLUS - PRO - STARTER - TRIAL - UNLEASHED +type SsoSettings implements Node { + id: PrefixedID! + + """List of configured OIDC providers""" + oidcProviders: [OidcProvider!]! } -enum registrationType { - BASIC - INVALID - LIFETIME - PLUS - PRO - STARTER - TRIAL - UNLEASHED +type UnifiedSettings implements Node & FormSchema { + id: PrefixedID! + + """The data schema for the settings""" + dataSchema: JSON! + + """The UI schema for the settings""" + uiSchema: JSON! + + """The current values of the settings""" + values: JSON! } -enum ServerStatus { - NEVER_CONNECTED - OFFLINE - ONLINE +interface FormSchema { + """The data schema for the form""" + dataSchema: JSON! + + """The UI schema for the form""" + uiSchema: JSON! + + """The current values of the form""" + values: JSON! } -enum Temperature { - CELSIUS - FAHRENHEIT +""" +The `JSON` scalar type represents JSON values as specified by [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf). +""" +scalar JSON @specifiedBy(url: "http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf") + +type ApiKeyFormSettings implements Node & FormSchema { + id: PrefixedID! + + """The data schema for the API key form""" + dataSchema: JSON! + + """The UI schema for the API key form""" + uiSchema: JSON! + + """The current values of the API key form""" + values: JSON! } +type UpdateSettingsResponse { + """Whether a restart is required for the changes to take effect""" + restartRequired: Boolean! + + """The updated settings values""" + values: JSON! + + """Warning messages about configuration issues found during validation""" + warnings: [String!] +} + +type Settings implements Node { + id: PrefixedID! + + """A view of all settings""" + unified: UnifiedSettings! + + """SSO settings""" + sso: SsoSettings! + + """The API setting values""" + api: ApiConfig! +} + +type Theme { + """The theme name""" + name: ThemeName! + + """Whether to show the header banner image""" + showBannerImage: Boolean! + + """Whether to show the banner gradient""" + showBannerGradient: Boolean! + + """Whether to show the description in the header""" + showHeaderDescription: Boolean! + + """The background color of the header""" + headerBackgroundColor: String + + """The text color of the header""" + headerPrimaryTextColor: String + + """The secondary text color of the header""" + headerSecondaryTextColor: String +} + +"""The theme name""" enum ThemeName { azure black @@ -232,232 +828,675 @@ enum ThemeName { white } -enum UpdateStatus { - REBUILD_READY - UNKNOWN - UPDATE_AVAILABLE - UP_TO_DATE +type InfoDisplayCase implements Node { + id: PrefixedID! + + """Case image URL""" + url: String! + + """Case icon identifier""" + icon: String! + + """Error message if any""" + error: String! + + """Base64 encoded case image""" + base64: String! } -enum VmState { - CRASHED - IDLE - NOSTATE - PAUSED - PMSUSPENDED +type InfoDisplay implements Node { + id: PrefixedID! + + """Case display configuration""" + case: InfoDisplayCase! + + """UI theme name""" + theme: ThemeName! + + """Temperature unit (C or F)""" + unit: Temperature! + + """Enable UI scaling""" + scale: Boolean! + + """Show tabs in UI""" + tabs: Boolean! + + """Enable UI resize""" + resize: Boolean! + + """Show WWN identifiers""" + wwn: Boolean! + + """Show totals""" + total: Boolean! + + """Show usage statistics""" + usage: Boolean! + + """Show text labels""" + text: Boolean! + + """Warning temperature threshold""" + warning: Int! + + """Critical temperature threshold""" + critical: Int! + + """Hot temperature threshold""" + hot: Int! + + """Maximum temperature threshold""" + max: Int + + """Locale setting""" + locale: String +} + +"""Temperature unit""" +enum Temperature { + CELSIUS + FAHRENHEIT +} + +type Language { + """Language code (e.g. en_US)""" + code: String! + + """Language description/name""" + name: String! + + """URL to the language pack XML""" + url: String +} + +type PartnerLink { + """Display title for the link""" + title: String! + + """The URL""" + url: String! +} + +type PartnerConfig { + name: String + url: String + + """Link to hardware specifications for this system""" + hardwareSpecsUrl: String + + """Link to the system manual/documentation""" + manualUrl: String + + """Link to manufacturer support page""" + supportUrl: String + + """Additional custom links provided by the partner""" + extraLinks: [PartnerLink!] +} + +type BrandingConfig { + header: String + headermetacolor: String + background: String + showBannerGradient: Boolean + theme: String + + """ + Banner image source. Supports local path, remote URL, or data URI/base64. + """ + bannerImage: String + + """ + Built-in case model value written to case-model.cfg when no custom override is supplied. + """ + caseModel: String + + """ + Case model image source. Supports local path, remote URL, or data URI/base64. + """ + caseModelImage: String + + """ + Partner logo source for light themes (azure/white). Supports local path, remote URL, or data URI/base64. + """ + partnerLogoLightUrl: String + + """ + Partner logo source for dark themes (black/gray). Supports local path, remote URL, or data URI/base64. + """ + partnerLogoDarkUrl: String + + """Indicates if a partner logo exists""" + hasPartnerLogo: Boolean + + """Custom title for onboarding welcome step""" + onboardingTitle: String + + """Custom subtitle for onboarding welcome step""" + onboardingSubtitle: String + + """Custom title for fresh install onboarding""" + onboardingTitleFreshInstall: String + + """Custom subtitle for fresh install onboarding""" + onboardingSubtitleFreshInstall: String + + """Custom title for upgrade onboarding""" + onboardingTitleUpgrade: String + + """Custom subtitle for upgrade onboarding""" + onboardingSubtitleUpgrade: String + + """Custom title for downgrade onboarding""" + onboardingTitleDowngrade: String + + """Custom subtitle for downgrade onboarding""" + onboardingSubtitleDowngrade: String + + """Custom title for incomplete onboarding""" + onboardingTitleIncomplete: String + + """Custom subtitle for incomplete onboarding""" + onboardingSubtitleIncomplete: String +} + +type SystemConfig { + serverName: String + model: String + comment: String +} + +type ActivationCode { + code: String + partner: PartnerConfig + branding: BrandingConfig + system: SystemConfig +} + +type OnboardingState { + registrationState: RegistrationState + + """Indicates whether the system is registered""" + isRegistered: Boolean! + + """Indicates whether the system is a fresh install""" + isFreshInstall: Boolean! + + """Indicates whether an activation code is present""" + hasActivationCode: Boolean! + + """Indicates whether activation is required based on current state""" + activationRequired: Boolean! +} + +"""Onboarding completion state and context""" +type Onboarding { + """ + The current onboarding status (INCOMPLETE, UPGRADE, DOWNGRADE, or COMPLETED) + """ + status: OnboardingStatus! + + """Whether this is a partner/OEM build with activation code""" + isPartnerBuild: Boolean! + + """Whether the onboarding flow has been completed""" + completed: Boolean! + + """The OS version when onboarding was completed""" + completedAtVersion: String + + """The activation code from the .activationcode file, if present""" + activationCode: String + + """Runtime onboarding state values used by the onboarding flow""" + onboardingState: OnboardingState! +} + +""" +The current onboarding status based on completion state and version relationship +""" +enum OnboardingStatus { + INCOMPLETE + UPGRADE + DOWNGRADE + COMPLETED +} + +type Customization { + activationCode: ActivationCode + + """Onboarding completion state and context""" + onboarding: Onboarding! + availableLanguages: [Language!] +} + +"""Result of attempting internal boot pool setup""" +type OnboardingInternalBootResult { + ok: Boolean! + code: Int + output: String! +} + +type RCloneDrive { + """Provider name""" + name: String! + + """Provider options and configuration schema""" + options: JSON! +} + +type RCloneBackupConfigForm { + id: ID! + dataSchema: JSON! + uiSchema: JSON! +} + +type RCloneBackupSettings { + configForm(formOptions: RCloneConfigFormInput): RCloneBackupConfigForm! + drives: [RCloneDrive!]! + remotes: [RCloneRemote!]! +} + +input RCloneConfigFormInput { + providerType: String + showAdvanced: Boolean = false + parameters: JSON +} + +type RCloneRemote { + name: String! + type: String! + parameters: JSON! + + """Complete remote configuration""" + config: JSON! +} + +"""Represents a tracked plugin installation operation""" +type PluginInstallOperation { + """Unique identifier of the operation""" + id: ID! + + """Plugin URL passed to the installer""" + url: String! + + """Optional plugin name for display purposes""" + name: String + + """Current status of the operation""" + status: PluginInstallStatus! + + """Timestamp when the operation was created""" + createdAt: DateTime! + + """Timestamp for the last update to this operation""" + updatedAt: DateTime + + """Timestamp when the operation finished, if applicable""" + finishedAt: DateTime + + """ + Collected output lines generated by the installer (capped at recent lines) + """ + output: [String!]! +} + +"""Status of a plugin installation operation""" +enum PluginInstallStatus { + FAILED + QUEUED RUNNING - SHUTDOWN - SHUTOFF + SUCCEEDED } -enum UPSCableType { - CUSTOM - ETHER - SIMPLE - SMART - USB +"""Emitted event representing progress for a plugin installation""" +type PluginInstallEvent { + """Identifier of the related plugin installation operation""" + operationId: ID! + + """Status reported with this event""" + status: PluginInstallStatus! + + """Output lines newly emitted since the previous event""" + output: [String!] + + """Timestamp when the event was emitted""" + timestamp: DateTime! } -enum UPSKillPower { - NO - YES -} +type ArrayMutations { + """Set array state""" + setState(input: ArrayStateInput!): UnraidArray! -enum UPSServiceState { - DISABLE - ENABLE -} + """Add new disk to array""" + addDiskToArray(input: ArrayDiskInput!): UnraidArray! -enum UPSType { - APCSMART - DUMB - MODBUS - NET - PCNET - SNMP - USB -} + """ + Remove existing disk from array. NOTE: The array must be stopped before running this otherwise it'll throw an error. + """ + removeDiskFromArray(input: ArrayDiskInput!): UnraidArray! -# ============================================================================ -# Interfaces -# ============================================================================ -interface Node { - id: PrefixedID! -} + """Mount a disk in the array""" + mountArrayDisk(id: PrefixedID!): ArrayDisk! -# ============================================================================ -# Input Types -# ============================================================================ -input AddPermissionInput { - actions: [AuthAction!]! - resource: Resource! -} + """Unmount a disk from the array""" + unmountArrayDisk(id: PrefixedID!): ArrayDisk! -input ArrayDiskInput { - id: PrefixedID! - slot: Int + """Clear statistics for a disk in the array""" + clearArrayDiskStatistics(id: PrefixedID!): Boolean! } input ArrayStateInput { + """Array state""" desiredState: ArrayStateInputState! } -input CreateApiKeyInput { - description: String - name: String! - overwrite: Boolean - permissions: [AddPermissionInput!] - roles: [Role!] +enum ArrayStateInputState { + START + STOP } -input UpdateApiKeyInput { - description: String +input ArrayDiskInput { + """Disk ID""" id: PrefixedID! - name: String - permissions: [AddPermissionInput!] + + """The slot for the disk""" + slot: Int +} + +type DockerMutations { + """Start a container""" + start(id: PrefixedID!): DockerContainer! + + """Stop a container""" + stop(id: PrefixedID!): DockerContainer! + + """Pause (Suspend) a container""" + pause(id: PrefixedID!): DockerContainer! + + """Unpause (Resume) a container""" + unpause(id: PrefixedID!): DockerContainer! + + """Remove a container""" + removeContainer(id: PrefixedID!, withImage: Boolean): Boolean! + + """Update auto-start configuration for Docker containers""" + updateAutostartConfiguration(entries: [DockerAutostartEntryInput!]!, persistUserPreferences: Boolean): Boolean! + + """Update a container to the latest image""" + updateContainer(id: PrefixedID!): DockerContainer! + + """Update multiple containers to the latest images""" + updateContainers(ids: [PrefixedID!]!): [DockerContainer!]! + + """Update all containers that have available updates""" + updateAllContainers: [DockerContainer!]! +} + +input DockerAutostartEntryInput { + """Docker container identifier""" + id: PrefixedID! + + """Whether the container should auto-start""" + autoStart: Boolean! + + """Number of seconds to wait after starting the container""" + wait: Int +} + +type VmMutations { + """Start a virtual machine""" + start(id: PrefixedID!): Boolean! + + """Stop a virtual machine""" + stop(id: PrefixedID!): Boolean! + + """Pause a virtual machine""" + pause(id: PrefixedID!): Boolean! + + """Resume a virtual machine""" + resume(id: PrefixedID!): Boolean! + + """Force stop a virtual machine""" + forceStop(id: PrefixedID!): Boolean! + + """Reboot a virtual machine""" + reboot(id: PrefixedID!): Boolean! + + """Reset a virtual machine""" + reset(id: PrefixedID!): Boolean! +} + +"""API Key related mutations""" +type ApiKeyMutations { + """Create an API key""" + create(input: CreateApiKeyInput!): ApiKey! + + """Add a role to an API key""" + addRole(input: AddRoleForApiKeyInput!): Boolean! + + """Remove a role from an API key""" + removeRole(input: RemoveRoleFromApiKeyInput!): Boolean! + + """Delete one or more API keys""" + delete(input: DeleteApiKeyInput!): Boolean! + + """Update an API key""" + update(input: UpdateApiKeyInput!): ApiKey! +} + +input CreateApiKeyInput { + name: String! + description: String roles: [Role!] + permissions: [AddPermissionInput!] + + """ + This will replace the existing key if one already exists with the same name, otherwise returns the existing key + """ + overwrite: Boolean +} + +input AddPermissionInput { + resource: Resource! + actions: [AuthAction!]! +} + +input AddRoleForApiKeyInput { + apiKeyId: PrefixedID! + role: Role! +} + +input RemoveRoleFromApiKeyInput { + apiKeyId: PrefixedID! + role: Role! } input DeleteApiKeyInput { ids: [PrefixedID!]! } -# Alias used in keys.py (deleteApiKeys at root level) -input DeleteApiKeysInput { - ids: [PrefixedID!]! +input UpdateApiKeyInput { + id: PrefixedID! + name: String + description: String + roles: [Role!] + permissions: [AddPermissionInput!] +} + +"""Customization related mutations""" +type CustomizationMutations { + """Update the UI theme (writes dynamix.cfg)""" + setTheme( + """Theme to apply""" + theme: ThemeName! + ): Theme! + + """Update the display locale (language)""" + setLocale( + """Locale code to apply (e.g. en_US)""" + locale: String! + ): String! +} + +""" +Parity check related mutations, WIP, response types and functionaliy will change +""" +type ParityCheckMutations { + """Start a parity check""" + start(correct: Boolean!): JSON! + + """Pause a parity check""" + pause: JSON! + + """Resume a parity check""" + resume: JSON! + + """Cancel a parity check""" + cancel: JSON! +} + +"""RClone related mutations""" +type RCloneMutations { + """Create a new RClone remote""" + createRCloneRemote(input: CreateRCloneRemoteInput!): RCloneRemote! + + """Delete an existing RClone remote""" + deleteRCloneRemote(input: DeleteRCloneRemoteInput!): Boolean! } input CreateRCloneRemoteInput { name: String! - parameters: JSON! type: String! + parameters: JSON! } input DeleteRCloneRemoteInput { name: String! } -input RCloneConfigFormInput { - parameters: JSON - providerType: String - showAdvanced: Boolean +"""Onboarding related mutations""" +type OnboardingMutations { + """Mark onboarding as completed""" + completeOnboarding: Onboarding! + + """Reset onboarding progress (for testing)""" + resetOnboarding: Onboarding! + + """Override onboarding state for testing (in-memory only)""" + setOnboardingOverride(input: OnboardingOverrideInput!): Onboarding! + + """Clear onboarding override state and reload from disk""" + clearOnboardingOverride: Onboarding! + + """Create and configure internal boot pool via emcmd operations""" + createInternalBootPool(input: CreateInternalBootPoolInput!): OnboardingInternalBootResult! } -input NotificationFilter { - importance: NotificationImportance - limit: Int! - offset: Int! - type: NotificationType! +"""Onboarding override input for testing""" +input OnboardingOverrideInput { + onboarding: OnboardingOverrideCompletionInput + activationCode: ActivationCodeOverrideInput + partnerInfo: PartnerInfoOverrideInput + registrationState: RegistrationState } -input NotificationData { - description: String! - importance: NotificationImportance! - link: String - subject: String! - title: String! +"""Onboarding completion override input""" +input OnboardingOverrideCompletionInput { + completed: Boolean + completedAtVersion: String } -# Alias used in notifications.py create mutation -input CreateNotificationInput { - description: String! - importance: NotificationImportance! - link: String - subject: String! - title: String! +"""Activation code override input""" +input ActivationCodeOverrideInput { + code: String + partner: PartnerConfigInput + branding: BrandingConfigInput + system: SystemConfigInput } -input UPSConfigInput { - batteryLevel: Int - customUpsCable: String - device: String - killUps: UPSKillPower - minutes: Int - overrideUpsCapacity: Int - service: UPSServiceState - timeout: Int - upsCable: UPSCableType - upsType: UPSType -} - -# ============================================================================ -# Object Types -# ============================================================================ -type Capacity { - free: String! - total: String! - used: String! -} - -type ArrayCapacity { - disks: Capacity! - kilobytes: Capacity! -} - -type ArrayDisk implements Node { - id: PrefixedID! - idx: Int! +input PartnerConfigInput { name: String - device: String - size: BigInt - status: ArrayDiskStatus - rotational: Boolean - temp: Int - numReads: BigInt - numWrites: BigInt - numErrors: BigInt - fsSize: BigInt - fsFree: BigInt - fsUsed: BigInt - exportable: Boolean - type: ArrayDiskType! - warning: Int - critical: Int - fsType: String + url: String + hardwareSpecsUrl: String + manualUrl: String + supportUrl: String + extraLinks: [PartnerLinkInput!] +} + +"""Partner link input for custom links""" +input PartnerLinkInput { + title: String! + url: String! +} + +input BrandingConfigInput { + header: String + headermetacolor: String + background: String + showBannerGradient: Boolean + theme: String + bannerImage: String + caseModel: String + caseModelImage: String + partnerLogoLightUrl: String + partnerLogoDarkUrl: String + hasPartnerLogo: Boolean + onboardingTitle: String + onboardingSubtitle: String + onboardingTitleFreshInstall: String + onboardingSubtitleFreshInstall: String + onboardingTitleUpgrade: String + onboardingSubtitleUpgrade: String + onboardingTitleDowngrade: String + onboardingSubtitleDowngrade: String + onboardingTitleIncomplete: String + onboardingSubtitleIncomplete: String +} + +input SystemConfigInput { + serverName: String + model: String comment: String - format: String - transport: String - color: ArrayDiskFsColor - isSpinning: Boolean } -type UnraidArray implements Node { - id: PrefixedID! - state: ArrayState! - capacity: ArrayCapacity! - boot: ArrayDisk - parities: [ArrayDisk!]! - disks: [ArrayDisk!]! - caches: [ArrayDisk!]! - parityCheckStatus: ParityCheck! +"""Partner info override input""" +input PartnerInfoOverrideInput { + partner: PartnerConfigInput + branding: BrandingConfigInput } -type ParityCheck { - correcting: Boolean - date: DateTime - duration: Int - errors: Int - paused: Boolean - progress: Int - running: Boolean - speed: String - status: ParityCheckStatus! +"""Input for creating an internal boot pool during onboarding""" +input CreateInternalBootPoolInput { + poolName: String! + devices: [String!]! + bootSizeMiB: Int! + updateBios: Boolean! + reboot: Boolean } -type ParityCheckMutations { - start(correct: Boolean): JSON! - pause: JSON! - resume: JSON! - cancel: JSON! +"""Unraid plugin management mutations""" +type UnraidPluginsMutations { + """Install an Unraid plugin and track installation progress""" + installPlugin(input: InstallPluginInput!): PluginInstallOperation! + + """Install an Unraid language pack and track installation progress""" + installLanguage(input: InstallPluginInput!): PluginInstallOperation! } -type ArrayMutations { - addDiskToArray(input: ArrayDiskInput!): UnraidArray! - clearArrayDiskStatistics(id: PrefixedID!): Boolean! - mountArrayDisk(id: PrefixedID!): ArrayDisk! - removeDiskFromArray(input: ArrayDiskInput!): UnraidArray! - setState(input: ArrayStateInput!): UnraidArray! - unmountArrayDisk(id: PrefixedID!): ArrayDisk! +"""Input payload for installing a plugin""" +input InstallPluginInput { + """Plugin installation URL (.plg)""" + url: String! + + """Optional human-readable plugin name used for logging""" + name: String + + """ + Force installation even when plugin is already present. Defaults to true to mirror the existing UI behaviour. + """ + forced: Boolean } type Config implements Node { @@ -466,587 +1505,541 @@ type Config implements Node { error: String } -type CoreVersions { - api: String - kernel: String - unraid: String -} - -type PackageVersions { - docker: String - git: String - nginx: String - node: String - npm: String - openssl: String - php: String - pm2: String -} - -type InfoVersions implements Node { - id: PrefixedID! - core: CoreVersions! - packages: PackageVersions - # Flattened fields used by the MCP tool queries (may exist in live API) - kernel: String - openssl: String - systemOpenssl: String - systemOpensslLib: String - node: String - v8: String - npm: String - yarn: String - pm2: String - gulp: String - grunt: String - git: String - tsc: String - mysql: String - redis: String - mongodb: String - apache: String - nginx: String - php: String - docker: String - postfix: String - postgresql: String - perl: String - python: String - gcc: String - unraid: String -} - -type InfoOs implements Node { - id: PrefixedID! - platform: String - distro: String - release: String - codename: String - kernel: String - arch: String - hostname: String - logofile: String - serial: String - build: String - uptime: String - fqdn: String - servicepack: String - uefi: Boolean - codepage: String -} - -type InfoCpu implements Node { - id: PrefixedID! - manufacturer: String - brand: String - vendor: String - family: String - model: String - stepping: Int - revision: String - voltage: String - speed: Float - speedmin: Float - speedmax: Float - threads: Int - cores: Int - processors: Int - socket: String - cache: JSON - flags: [String!] - packages: CpuPackages! - topology: [[[Int!]!]!]! -} - -type CpuLoad { - percentGuest: Float! - percentIdle: Float! - percentIrq: Float! - percentNice: Float! - percentSteal: Float! - percentSystem: Float! - percentTotal: Float! - percentUser: Float! -} - -type CpuPackages implements Node { - id: PrefixedID! - power: [Float!]! - temp: [Float!]! - totalPower: Float! -} - -type CpuUtilization implements Node { - id: PrefixedID! - cpus: [CpuLoad!]! - percentTotal: Float! -} - -type MemoryLayout implements Node { - id: PrefixedID! - bank: String - type: String - clockSpeed: Int - formFactor: String - manufacturer: String - partNum: String - serialNum: String - size: BigInt! - voltageConfigured: Int - voltageMax: Int - voltageMin: Int -} - -type InfoMemory implements Node { - id: PrefixedID! - layout: [MemoryLayout!]! -} - -type MemoryUtilization implements Node { - id: PrefixedID! - active: BigInt! - available: BigInt! - buffcache: BigInt! - free: BigInt! - percentSwapTotal: Float! - percentTotal: Float! - swapFree: BigInt! - swapTotal: BigInt! - swapUsed: BigInt! - total: BigInt! - used: BigInt! -} - -type InfoBaseboard implements Node { - id: PrefixedID! - manufacturer: String - model: String - version: String - serial: String - assetTag: String - memMax: Float - memSlots: Float -} - -type InfoSystem implements Node { - id: PrefixedID! - manufacturer: String - model: String - version: String - serial: String - uuid: String - sku: String - virtual: Boolean -} - type InfoGpu implements Node { id: PrefixedID! - blacklisted: Boolean! - class: String! - productid: String! + + """GPU type/manufacturer""" type: String! + + """GPU type identifier""" typeid: String! + + """Whether GPU is blacklisted""" + blacklisted: Boolean! + + """Device class""" + class: String! + + """Product ID""" + productid: String! + + """Vendor name""" vendorname: String } type InfoNetwork implements Node { id: PrefixedID! + + """Network interface name""" iface: String! - mac: String + + """Network interface model""" model: String - speed: String + + """Network vendor""" vendor: String + + """MAC address""" + mac: String + + """Virtual interface flag""" virtual: Boolean + + """Network speed""" + speed: String + + """DHCP enabled flag""" dhcp: Boolean } type InfoPci implements Node { id: PrefixedID! - blacklisted: String! - class: String! - productid: String! - productname: String + + """Device type/manufacturer""" type: String! + + """Type identifier""" typeid: String! - vendorid: String! + + """Vendor name""" vendorname: String + + """Vendor ID""" + vendorid: String! + + """Product name""" + productname: String + + """Product ID""" + productid: String! + + """Blacklisted status""" + blacklisted: String! + + """Device class""" + class: String! } type InfoUsb implements Node { id: PrefixedID! - bus: String - device: String + + """USB device name""" name: String! + + """USB bus number""" + bus: String + + """USB device number""" + device: String } type InfoDevices implements Node { id: PrefixedID! + + """List of GPU devices""" gpu: [InfoGpu!] + + """List of network interfaces""" network: [InfoNetwork!] + + """List of PCI devices""" pci: [InfoPci!] + + """List of USB devices""" usb: [InfoUsb!] } -type InfoDisplayCase implements Node { - id: PrefixedID! - base64: String! - error: String! - icon: String! - url: String! +"""CPU load for a single core""" +type CpuLoad { + """The total CPU load on a single core, in percent.""" + percentTotal: Float! + + """The percentage of time the CPU spent in user space.""" + percentUser: Float! + + """The percentage of time the CPU spent in kernel space.""" + percentSystem: Float! + + """ + The percentage of time the CPU spent on low-priority (niced) user space processes. + """ + percentNice: Float! + + """The percentage of time the CPU was idle.""" + percentIdle: Float! + + """The percentage of time the CPU spent servicing hardware interrupts.""" + percentIrq: Float! + + """The percentage of time the CPU spent running virtual machines (guest).""" + percentGuest: Float! + + """The percentage of CPU time stolen by the hypervisor.""" + percentSteal: Float! } -type InfoDisplay implements Node { +type CpuPackages implements Node { id: PrefixedID! - case: InfoDisplayCase! - critical: Int! - hot: Int! - locale: String - max: Int - resize: Boolean! - scale: Boolean! - tabs: Boolean! - text: Boolean! - theme: ThemeName! - total: Boolean! - unit: Temperature! - usage: Boolean! - warning: Int! - wwn: Boolean! + + """Total CPU package power draw (W)""" + totalPower: Float! + + """Power draw per package (W)""" + power: [Float!]! + + """Temperature per package (°C)""" + temp: [Float!]! } -type Apps { - installed: Int - started: Int +type CpuUtilization implements Node { + id: PrefixedID! + + """Total CPU load in percent""" + percentTotal: Float! + + """CPU load for each core""" + cpus: [CpuLoad!]! +} + +type InfoCpu implements Node { + id: PrefixedID! + + """CPU manufacturer""" + manufacturer: String + + """CPU brand name""" + brand: String + + """CPU vendor""" + vendor: String + + """CPU family""" + family: String + + """CPU model""" + model: String + + """CPU stepping""" + stepping: Int + + """CPU revision""" + revision: String + + """CPU voltage""" + voltage: String + + """Current CPU speed in GHz""" + speed: Float + + """Minimum CPU speed in GHz""" + speedmin: Float + + """Maximum CPU speed in GHz""" + speedmax: Float + + """Number of CPU threads""" + threads: Int + + """Number of CPU cores""" + cores: Int + + """Number of physical processors""" + processors: Int + + """CPU socket type""" + socket: String + + """CPU cache information""" + cache: JSON + + """CPU feature flags""" + flags: [String!] + + """ + Per-package array of core/thread pairs, e.g. [[[0,1],[2,3]], [[4,5],[6,7]]] + """ + topology: [[[Int!]!]!]! + packages: CpuPackages! +} + +type MemoryLayout implements Node { + id: PrefixedID! + + """Memory module size in bytes""" + size: BigInt! + + """Memory bank location (e.g., BANK 0)""" + bank: String + + """Memory type (e.g., DDR4, DDR5)""" + type: String + + """Memory clock speed in MHz""" + clockSpeed: Int + + """Part number of the memory module""" + partNum: String + + """Serial number of the memory module""" + serialNum: String + + """Memory manufacturer""" + manufacturer: String + + """Form factor (e.g., DIMM, SODIMM)""" + formFactor: String + + """Configured voltage in millivolts""" + voltageConfigured: Int + + """Minimum voltage in millivolts""" + voltageMin: Int + + """Maximum voltage in millivolts""" + voltageMax: Int +} + +type MemoryUtilization implements Node { + id: PrefixedID! + + """Total system memory in bytes""" + total: BigInt! + + """Used memory in bytes""" + used: BigInt! + + """Free memory in bytes""" + free: BigInt! + + """Available memory in bytes""" + available: BigInt! + + """Active memory in bytes""" + active: BigInt! + + """Buffer/cache memory in bytes""" + buffcache: BigInt! + + """Memory usage percentage""" + percentTotal: Float! + + """Total swap memory in bytes""" + swapTotal: BigInt! + + """Used swap memory in bytes""" + swapUsed: BigInt! + + """Free swap memory in bytes""" + swapFree: BigInt! + + """Swap usage percentage""" + percentSwapTotal: Float! +} + +type InfoMemory implements Node { + id: PrefixedID! + + """Physical memory layout""" + layout: [MemoryLayout!]! +} + +type InfoNetworkInterface implements Node { + id: PrefixedID! + + """Interface name (e.g. eth0)""" + name: String! + + """Interface description/label""" + description: String + + """MAC Address""" + macAddress: String + + """Connection status""" + status: String + + """IPv4 Protocol mode""" + protocol: String + + """IPv4 Address""" + ipAddress: String + + """IPv4 Netmask""" + netmask: String + + """IPv4 Gateway""" + gateway: String + + """Using DHCP for IPv4""" + useDhcp: Boolean + + """IPv6 Address""" + ipv6Address: String + + """IPv6 Netmask""" + ipv6Netmask: String + + """IPv6 Gateway""" + ipv6Gateway: String + + """Using DHCP for IPv6""" + useDhcp6: Boolean +} + +type InfoOs implements Node { + id: PrefixedID! + + """Operating system platform""" + platform: String + + """Linux distribution name""" + distro: String + + """OS release version""" + release: String + + """OS codename""" + codename: String + + """Kernel version""" + kernel: String + + """OS architecture""" + arch: String + + """Hostname""" + hostname: String + + """Fully qualified domain name""" + fqdn: String + + """OS build identifier""" + build: String + + """Service pack version""" + servicepack: String + + """Boot time ISO string""" + uptime: String + + """OS logo name""" + logofile: String + + """OS serial number""" + serial: String + + """OS started via UEFI""" + uefi: Boolean +} + +type InfoSystem implements Node { + id: PrefixedID! + + """System manufacturer""" + manufacturer: String + + """System model""" + model: String + + """System version""" + version: String + + """System serial number""" + serial: String + + """System UUID""" + uuid: String + + """System SKU""" + sku: String + + """Virtual machine flag""" + virtual: Boolean +} + +type InfoBaseboard implements Node { + id: PrefixedID! + + """Motherboard manufacturer""" + manufacturer: String + + """Motherboard model""" + model: String + + """Motherboard version""" + version: String + + """Motherboard serial number""" + serial: String + + """Motherboard asset tag""" + assetTag: String + + """Maximum memory capacity in bytes""" + memMax: Float + + """Number of memory slots""" + memSlots: Float +} + +type CoreVersions { + """Unraid version""" + unraid: String + + """Unraid API version""" + api: String + + """Kernel version""" + kernel: String +} + +type PackageVersions { + """OpenSSL version""" + openssl: String + + """Node.js version""" + node: String + + """npm version""" + npm: String + + """pm2 version""" + pm2: String + + """Git version""" + git: String + + """nginx version""" + nginx: String + + """PHP version""" + php: String + + """Docker version""" + docker: String +} + +type InfoVersions implements Node { + id: PrefixedID! + + """Core system versions""" + core: CoreVersions! + + """Software package versions""" + packages: PackageVersions } type Info implements Node { id: PrefixedID! - os: InfoOs! - cpu: InfoCpu! - memory: InfoMemory! - baseboard: InfoBaseboard! - system: InfoSystem! - versions: InfoVersions! - devices: InfoDevices! - display: InfoDisplay! - apps: Apps - machineId: ID + + """Current server time""" time: DateTime! + + """Motherboard information""" + baseboard: InfoBaseboard! + + """CPU information""" + cpu: InfoCpu! + + """Device information""" + devices: InfoDevices! + + """Display configuration""" + display: InfoDisplay! + + """Machine ID""" + machineId: ID + + """Memory information""" + memory: InfoMemory! + + """Operating system information""" + os: InfoOs! + + """System information""" + system: InfoSystem! + + """Software versions""" + versions: InfoVersions! + + """Network interfaces""" + networkInterfaces: [InfoNetworkInterface!]! + + """Primary management interface""" + primaryNetwork: InfoNetworkInterface } -type MetricsCpu { - used: Float -} - -type MetricsMemory { - used: Float - total: Float -} - -type Metrics implements Node { - id: PrefixedID! - cpu: MetricsCpu - memory: MetricsMemory -} - -type Service implements Node { - id: PrefixedID! - name: String - state: String - online: Boolean - uptime: Uptime - version: String -} - -type Uptime { - timestamp: String -} - -type AccessUrl { - type: String - name: String - ipv4: String - ipv6: String -} - -type Network implements Node { - id: PrefixedID! - accessUrls: [AccessUrl!] -} - -type KeyFile { - contents: String - location: String -} - -type Registration implements Node { - id: PrefixedID! - type: registrationType - keyFile: KeyFile - state: RegistrationState - expiration: String - updateExpiration: String -} - -type ConnectSettings { - status: String - sandbox: Boolean - flashGuid: String -} - -type Owner { - username: String! - avatar: String! - url: String! -} - -type ProfileModel implements Node { - id: PrefixedID! - avatar: String! - url: String! - username: String! -} - -type Server implements Node { - id: PrefixedID! +type ExplicitStatusItem { name: String! - status: ServerStatus! - description: String - ip: String - port: Int - guid: String! - apikey: String! - lanip: String! - localurl: String! - remoteurl: String! - owner: ProfileModel! - wanip: String! + updateStatus: UpdateStatus! } -type Flash implements Node { - id: PrefixedID! - guid: String! - product: String! - vendor: String! - size: BigInt -} - -type Vars implements Node { - id: PrefixedID! - version: String - name: String - timeZone: String - comment: String - security: String - workgroup: String - domain: String - domainShort: String - hideDotFiles: Boolean - localMaster: Boolean - enableFruit: String - useNtp: Boolean - domainLogin: String - sysModel: String - sysFlashSlots: Int - useSsl: Boolean - port: Int - portssl: Int - localTld: String - bindMgt: Boolean - useTelnet: Boolean - porttelnet: Int - useSsh: Boolean - portssh: Int - startPage: String - startArray: Boolean - shutdownTimeout: Int - shareSmbEnabled: Boolean - shareNfsEnabled: Boolean - shareAfpEnabled: Boolean - shareCacheEnabled: Boolean - shareAvahiEnabled: Boolean - safeMode: Boolean - startMode: String - configValid: Boolean - configError: ConfigErrorState - joinStatus: String - deviceCount: Int - flashGuid: String - flashProduct: String - flashVendor: String - mdState: String - mdVersion: String - shareCount: Int - shareSmbCount: Int - shareNfsCount: Int - shareAfpCount: Int - shareMoverActive: Boolean - csrfToken: String -} - -type ApiConfig { - extraOrigins: [String!]! - plugins: [String!]! - sandbox: Boolean - ssoSubIds: [String!]! - version: String! -} - -type SsoSettings implements Node { - id: PrefixedID! - oidcProviders: [OidcProvider!]! -} - -type OidcProvider { - id: PrefixedID! - name: String! - clientId: String! - clientSecret: String - issuer: String - authorizationEndpoint: String - tokenEndpoint: String - jwksUri: String - scopes: [String!]! - buttonText: String - buttonIcon: String - buttonStyle: String - buttonVariant: String - authorizationRuleMode: String - authorizationRules: [JSON!] -} - -type UnifiedSettings implements Node { - id: PrefixedID! - dataSchema: JSON! - uiSchema: JSON! - values: JSON! -} - -type Settings implements Node { - id: PrefixedID! - api: ApiConfig! - sso: SsoSettings! - unified: UnifiedSettings! -} - -type UPSBattery { - chargeLevel: Int! - estimatedRuntime: Int! - health: String! -} - -type UPSPower { - inputVoltage: Float! - loadPercentage: Int! - outputVoltage: Float! -} - -type UPSDevice { - id: ID! - model: String! - name: String! - status: String! - battery: UPSBattery! - power: UPSPower! - # Flattened fields used by MCP tool queries - runtime: Int - charge: Int - load: Int - voltage: Float - frequency: Float - temperature: Float -} - -type UPSConfiguration { - enabled: Boolean - mode: String - cable: String - driver: String - port: String - batteryLevel: Int - customUpsCable: String - device: String - killUps: String - minutes: Int - modelName: String - netServer: String - nisIp: String - overrideUpsCapacity: Int - service: String - timeout: Int - upsCable: String - upsName: String - upsType: String -} - -type Share implements Node { - id: PrefixedID! - name: String - free: BigInt - used: BigInt - size: BigInt - include: [String!] - exclude: [String!] - cache: Boolean - nameOrig: String - comment: String - allocator: String - splitLevel: String - floor: String - cow: String - color: String - luksStatus: String -} - -type Disk implements Node { - id: PrefixedID! - device: String! - name: String! - serialNum: String! - size: Float! - temperature: Float - bytesPerSector: Float! - firmwareRevision: String! - interfaceType: DiskInterfaceType! - isSpinning: Boolean! - partitions: [DiskPartition!]! - sectorsPerTrack: Float! - smartStatus: DiskSmartStatus! - totalCylinders: Float! - totalHeads: Float! - totalSectors: Float! - totalTracks: Float! - tracksPerCylinder: Float! - type: String! - vendor: String! -} - -type DiskPartition { - fsType: DiskFsType! - name: String! - size: Float! -} - -type UnassignedDevice { - id: PrefixedID! - device: String - name: String - size: BigInt - type: String -} - -type LogFile { - name: String! - path: String! - size: Int! - modifiedAt: DateTime! -} - -type LogFileContent { - path: String! - content: String! - totalLines: Int! - startLine: Int +"""Update status of a container.""" +enum UpdateStatus { + UP_TO_DATE + UPDATE_AVAILABLE + REBUILD_READY + UNKNOWN } type ContainerPort { @@ -1056,6 +2049,39 @@ type ContainerPort { type: ContainerPortType! } +""" +A field whose value is a valid TCP port within the range of 0 to 65535: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_ports +""" +scalar Port + +enum ContainerPortType { + TCP + UDP +} + +type DockerPortConflictContainer { + id: PrefixedID! + name: String! +} + +type DockerContainerPortConflict { + privatePort: Port! + type: ContainerPortType! + containers: [DockerPortConflictContainer!]! +} + +type DockerLanPortConflict { + lanIpPort: String! + publicPort: Port + type: ContainerPortType! + containers: [DockerPortConflictContainer!]! +} + +type DockerPortConflicts { + containerPorts: [DockerContainerPortConflict!]! + lanPorts: [DockerLanPortConflict!]! +} + type ContainerHostConfig { networkMode: String! } @@ -1068,7 +2094,18 @@ type DockerContainer implements Node { command: String! created: Int! ports: [ContainerPort!]! + + """List of LAN-accessible host:port values""" + lanIpPorts: [String!] + + """Total size of all files in the container (in bytes)""" sizeRootFs: BigInt + + """Size of writable layer (in bytes)""" + sizeRw: BigInt + + """Size of container logs (in bytes)""" + sizeLog: BigInt labels: JSON state: ContainerState! status: String! @@ -1076,110 +2113,217 @@ type DockerContainer implements Node { networkSettings: JSON mounts: [JSON!] autoStart: Boolean! + + """Zero-based order in the auto-start list""" + autoStartOrder: Int + + """Wait time in seconds applied after start""" + autoStartWait: Int + templatePath: String + + """Project/Product homepage URL""" + projectUrl: String + + """Registry/Docker Hub URL""" + registryUrl: String + + """Support page/thread URL""" + supportUrl: String + + """Icon URL""" + iconUrl: String + + """Resolved WebUI URL from template""" + webUiUrl: String + + """Shell to use for console access (from template)""" + shell: String + + """Port mappings from template (used when container is not running)""" + templatePorts: [ContainerPort!] + + """Whether the container is orphaned (no template found)""" + isOrphaned: Boolean! + isUpdateAvailable: Boolean + isRebuildReady: Boolean + + """Whether Tailscale is enabled for this container""" + tailscaleEnabled: Boolean! + + """Tailscale status for this container (fetched via docker exec)""" + tailscaleStatus(forceRefresh: Boolean = false): TailscaleStatus } -type PortConflict { - containerName: String - port: Int - conflictsWith: String -} - -type ExplicitStatusItem { - name: String! - updateStatus: UpdateStatus! -} - -type ContainerUpdateStatus { - id: PrefixedID! - name: String - updateAvailable: Boolean - currentVersion: String - latestVersion: String -} - -type DockerMutations { - start(id: PrefixedID!): DockerContainer! - stop(id: PrefixedID!): DockerContainer! - pause(id: PrefixedID!): DockerContainer! - unpause(id: PrefixedID!): DockerContainer! - removeContainer(id: PrefixedID!): Boolean! - updateContainer(id: PrefixedID!): DockerContainer! - updateAllContainers: [DockerContainer!]! - logs(id: PrefixedID!, tail: Int): String +enum ContainerState { + RUNNING + PAUSED + EXITED } type DockerNetwork implements Node { id: PrefixedID! name: String! - driver: String! + created: String! scope: String! - containers: JSON! + driver: String! + enableIPv6: Boolean! + ipam: JSON! + internal: Boolean! attachable: Boolean! + ingress: Boolean! configFrom: JSON! configOnly: Boolean! - created: String! - enableIPv6: Boolean! - ingress: Boolean! - internal: Boolean! - ipam: JSON! - labels: JSON! + containers: JSON! options: JSON! + labels: JSON! +} + +type DockerContainerLogLine { + timestamp: DateTime! + message: String! +} + +type DockerContainerLogs { + containerId: PrefixedID! + lines: [DockerContainerLogLine!]! + + """ + Cursor that can be passed back through the since argument to continue streaming logs. + """ + cursor: DateTime +} + +type DockerContainerStats { + id: PrefixedID! + + """CPU Usage Percentage""" + cpuPercent: Float! + + """Memory Usage String (e.g. 100MB / 1GB)""" + memUsage: String! + + """Memory Usage Percentage""" + memPercent: Float! + + """Network I/O String (e.g. 100MB / 1GB)""" + netIO: String! + + """Block I/O String (e.g. 100MB / 1GB)""" + blockIO: String! +} + +"""Tailscale exit node connection status""" +type TailscaleExitNodeStatus { + """Whether the exit node is online""" + online: Boolean! + + """Tailscale IPs of the exit node""" + tailscaleIps: [String!] +} + +"""Tailscale status for a Docker container""" +type TailscaleStatus { + """Whether Tailscale is online in the container""" + online: Boolean! + + """Current Tailscale version""" + version: String + + """Latest available Tailscale version""" + latestVersion: String + + """Whether a Tailscale update is available""" + updateAvailable: Boolean! + + """Configured Tailscale hostname""" + hostname: String + + """Actual Tailscale DNS name""" + dnsName: String + + """DERP relay code""" + relay: String + + """DERP relay region name""" + relayName: String + + """Tailscale IPv4 and IPv6 addresses""" + tailscaleIps: [String!] + + """Advertised subnet routes""" + primaryRoutes: [String!] + + """Whether this container is an exit node""" + isExitNode: Boolean! + + """Status of the connected exit node (if using one)""" + exitNodeStatus: TailscaleExitNodeStatus + + """Tailscale Serve/Funnel WebUI URL""" + webUiUrl: String + + """Tailscale key expiry date""" + keyExpiry: DateTime + + """Days until key expires""" + keyExpiryDays: Int + + """Whether the Tailscale key has expired""" + keyExpired: Boolean! + + """Tailscale backend state (Running, NeedsLogin, Stopped, etc.)""" + backendState: String + + """Authentication URL if Tailscale needs login""" + authUrl: String } type Docker implements Node { id: PrefixedID! - containers(skipCache: Boolean! = false): [DockerContainer!]! - networks(skipCache: Boolean! = false): [DockerNetwork!]! - portConflicts: [PortConflict!] - containerUpdateStatuses: [ContainerUpdateStatus!] - logs(id: PrefixedID!, tail: Int): String + containers(skipCache: Boolean! = false @deprecated(reason: "Caching has been removed; this parameter is now ignored")): [DockerContainer!]! + networks(skipCache: Boolean! = false @deprecated(reason: "Caching has been removed; this parameter is now ignored")): [DockerNetwork!]! + portConflicts(skipCache: Boolean! = false @deprecated(reason: "Caching has been removed; this parameter is now ignored")): DockerPortConflicts! + + """ + Access container logs. Requires specifying a target container id through resolver arguments. + """ + logs(id: PrefixedID!, since: DateTime, tail: Int): DockerContainerLogs! + container(id: PrefixedID!): DockerContainer + organizer(skipCache: Boolean! = false @deprecated(reason: "Caching has been removed; this parameter is now ignored")): ResolvedOrganizerV1! + containerUpdateStatuses: [ExplicitStatusItem!]! } -type VmDomain implements Node { - id: PrefixedID! - name: String - state: VmState! - uuid: String +type DockerTemplateSyncResult { + scanned: Int! + matched: Int! + skipped: Int! + errors: [String!]! } -type VmMutations { - start(id: PrefixedID!): Boolean! - stop(id: PrefixedID!): Boolean! - pause(id: PrefixedID!): Boolean! - resume(id: PrefixedID!): Boolean! - forceStop(id: PrefixedID!): Boolean! - reboot(id: PrefixedID!): Boolean! - reset(id: PrefixedID!): Boolean! -} - -type Vms implements Node { - id: PrefixedID! - domain: [VmDomain!] - domains: [VmDomain!] -} - -type Permission { - actions: [AuthAction!]! - resource: Resource! -} - -type ApiKey implements Node { - id: PrefixedID! +type ResolvedOrganizerView { + id: String! name: String! - key: String! - roles: JSON - permissions: JSON - createdAt: String! - description: String - lastUsed: String + rootId: String! + flatEntries: [FlatOrganizerEntry!]! + prefs: JSON } -type ApiKeyMutations { - create(input: CreateApiKeyInput!): ApiKey! - update(input: UpdateApiKeyInput!): ApiKey! - delete(input: DeleteApiKeyInput!): Boolean! - addRole(input: JSON!): Boolean! - removeRole(input: JSON!): Boolean! +type ResolvedOrganizerV1 { + version: Float! + views: [ResolvedOrganizerView!]! +} + +type FlatOrganizerEntry { + id: String! + type: String! + name: String! + parentId: String + depth: Float! + position: Float! + path: [String!]! + hasChildren: Boolean! + childrenIds: [String!]! + meta: DockerContainer } type NotificationCounts { @@ -1196,233 +2340,1231 @@ type NotificationOverview { type Notification implements Node { id: PrefixedID! + + """Also known as 'event'""" title: String! subject: String! description: String! importance: NotificationImportance! link: String type: NotificationType! + + """ISO Timestamp for when the notification occurred""" timestamp: String formattedTimestamp: String } +enum NotificationImportance { + ALERT + INFO + WARNING +} + +enum NotificationType { + UNREAD + ARCHIVE +} + type Notifications implements Node { id: PrefixedID! + + """A cached overview of the notifications in the system & their severity.""" overview: NotificationOverview! list(filter: NotificationFilter!): [Notification!]! - warningsAndAlerts: [Notification!] - # Mutation-like fields used by MCP notification mutations - createNotification(input: CreateNotificationInput!): Notification - archiveNotification(id: PrefixedID!): Boolean - unreadNotification(id: PrefixedID!): Boolean - deleteNotification(id: PrefixedID!, type: NotificationType!): Boolean - deleteArchivedNotifications: Boolean - archiveAll(importance: NotificationImportance): Boolean + + """ + Deduplicated list of unread warning and alert notifications, sorted latest first. + """ + warningsAndAlerts: [Notification!]! +} + +input NotificationFilter { + importance: NotificationImportance + type: NotificationType! + offset: Int! + limit: Int! +} + +type FlashBackupStatus { + """Status message indicating the outcome of the backup initiation.""" + status: String! + + """Job ID if available, can be used to check job status.""" + jobId: String +} + +type Flash implements Node { + id: PrefixedID! + guid: String! + vendor: String! + product: String! +} + +type LogFile { + """Name of the log file""" + name: String! + + """Full path to the log file""" + path: String! + + """Size of the log file in bytes""" + size: Int! + + """Last modified timestamp""" + modifiedAt: DateTime! +} + +type LogFileContent { + """Path to the log file""" + path: String! + + """Content of the log file""" + content: String! + + """Total number of lines in the file""" + totalLines: Int! + + """Starting line number of the content (1-indexed)""" + startLine: Int +} + +type TemperatureReading { + """Temperature value""" + value: Float! + + """Temperature unit""" + unit: TemperatureUnit! + + """Timestamp of reading""" + timestamp: DateTime! + + """Temperature status""" + status: TemperatureStatus! +} + +enum TemperatureUnit { + CELSIUS + FAHRENHEIT + KELVIN + RANKINE +} + +enum TemperatureStatus { + NORMAL + WARNING + CRITICAL + UNKNOWN +} + +type TemperatureSensor implements Node { + id: PrefixedID! + + """Sensor name""" + name: String! + + """Type of sensor""" + type: SensorType! + + """Physical location""" + location: String + + """Current temperature""" + current: TemperatureReading! + + """Minimum recorded""" + min: TemperatureReading + + """Maximum recorded""" + max: TemperatureReading + + """Warning threshold""" + warning: Float + + """Critical threshold""" + critical: Float + + """Historical readings for this sensor""" + history: [TemperatureReading!] +} + +"""Type of temperature sensor""" +enum SensorType { + CPU_PACKAGE + CPU_CORE + MOTHERBOARD + CHIPSET + GPU + DISK + NVME + AMBIENT + VRM + CUSTOM +} + +type TemperatureSummary { + """Average temperature across all sensors""" + average: Float! + + """Hottest sensor""" + hottest: TemperatureSensor! + + """Coolest sensor""" + coolest: TemperatureSensor! + + """Count of sensors at warning level""" + warningCount: Int! + + """Count of sensors at critical level""" + criticalCount: Int! +} + +type TemperatureMetrics implements Node { + id: PrefixedID! + + """All temperature sensors""" + sensors: [TemperatureSensor!]! + + """Temperature summary""" + summary: TemperatureSummary! +} + +"""System metrics including CPU and memory utilization""" +type Metrics implements Node { + id: PrefixedID! + + """Current CPU utilization metrics""" + cpu: CpuUtilization + + """Current memory utilization metrics""" + memory: MemoryUtilization + + """Temperature metrics""" + temperature: TemperatureMetrics +} + +type SensorConfig { + enabled: Boolean +} + +type LmSensorsConfig { + enabled: Boolean + config_path: String +} + +type IpmiConfig { + enabled: Boolean + args: [String!] +} + +type TemperatureSensorsConfig { + lm_sensors: LmSensorsConfig + smartctl: SensorConfig + ipmi: IpmiConfig +} + +type TemperatureThresholdsConfig { + cpu_warning: Int + cpu_critical: Int + disk_warning: Int + disk_critical: Int + warning: Int + critical: Int +} + +type TemperatureHistoryConfig { + max_readings: Int + retention_ms: Int +} + +type Owner { + username: String! + url: String! + avatar: String! +} + +type ProfileModel implements Node { + id: PrefixedID! + username: String! + url: String! + avatar: String! +} + +type Server implements Node { + id: PrefixedID! + owner: ProfileModel! + guid: String! + apikey: String! + name: String! + + """Server description/comment""" + comment: String + + """Whether this server is online or offline""" + status: ServerStatus! + wanip: String! + lanip: String! + localurl: String! + remoteurl: String! +} + +enum ServerStatus { + ONLINE + OFFLINE + NEVER_CONNECTED +} + +type ApiConfig { + version: String! + extraOrigins: [String!]! + sandbox: Boolean + ssoSubIds: [String!]! + plugins: [String!]! +} + +type OidcAuthorizationRule { + """The claim to check (e.g., email, sub, groups, hd)""" + claim: String! + + """The comparison operator""" + operator: AuthorizationOperator! + + """The value(s) to match against""" + value: [String!]! +} + +"""Operators for authorization rule matching""" +enum AuthorizationOperator { + EQUALS + CONTAINS + ENDS_WITH + STARTS_WITH +} + +type OidcProvider { + """The unique identifier for the OIDC provider""" + id: PrefixedID! + + """Display name of the OIDC provider""" + name: String! + + """OAuth2 client ID registered with the provider""" + clientId: String! + + """OAuth2 client secret (if required by provider)""" + clientSecret: String + + """ + OIDC issuer URL (e.g., https://accounts.google.com). Required for auto-discovery via /.well-known/openid-configuration + """ + issuer: String + + """ + OAuth2 authorization endpoint URL. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + authorizationEndpoint: String + + """ + OAuth2 token endpoint URL. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + tokenEndpoint: String + + """ + JSON Web Key Set URI for token validation. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + jwksUri: String + + """OAuth2 scopes to request (e.g., openid, profile, email)""" + scopes: [String!]! + + """Flexible authorization rules based on claims""" + authorizationRules: [OidcAuthorizationRule!] + + """ + Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass). Defaults to OR. + """ + authorizationRuleMode: AuthorizationRuleMode + + """Custom text for the login button""" + buttonText: String + + """URL or base64 encoded icon for the login button""" + buttonIcon: String + + """ + Button variant style from Reka UI. See https://reka-ui.com/docs/components/button + """ + buttonVariant: String + + """ + Custom CSS styles for the button (e.g., "background: linear-gradient(to right, #4f46e5, #7c3aed); border-radius: 9999px;") + """ + buttonStyle: String +} + +""" +Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass) +""" +enum AuthorizationRuleMode { + OR + AND +} + +type OidcConfiguration { + """List of configured OIDC providers""" + providers: [OidcProvider!]! + + """ + Default allowed redirect origins that apply to all OIDC providers (e.g., Tailscale domains) + """ + defaultAllowedOrigins: [String!] +} + +type OidcSessionValidation { + valid: Boolean! + username: String +} + +type PublicOidcProvider { + id: ID! + name: String! + buttonText: String + buttonIcon: String + buttonVariant: String + buttonStyle: String +} + +"""System time configuration and current status""" +type SystemTime { + """Current server time in ISO-8601 format (UTC)""" + currentTime: String! + + """IANA timezone identifier currently in use""" + timeZone: String! + + """Whether NTP/PTP time synchronization is enabled""" + useNtp: Boolean! + + """Configured NTP servers (empty strings indicate unused slots)""" + ntpServers: [String!]! +} + +"""Selectable timezone option from the system list""" +type TimeZoneOption { + """IANA timezone identifier""" + value: String! + + """Display label for the timezone""" + label: String! +} + +type UPSBattery { + """ + Battery charge level as a percentage (0-100). Unit: percent (%). Example: 100 means battery is fully charged + """ + chargeLevel: Int! + + """ + Estimated runtime remaining on battery power. Unit: seconds. Example: 3600 means 1 hour of runtime remaining + """ + estimatedRuntime: Int! + + """ + Battery health status. Possible values: 'Good', 'Replace', 'Unknown'. Indicates if the battery needs replacement + """ + health: String! +} + +type UPSPower { + """ + Input voltage from the wall outlet/mains power. Unit: volts (V). Example: 120.5 for typical US household voltage + """ + inputVoltage: Float! + + """ + Output voltage being delivered to connected devices. Unit: volts (V). Example: 120.5 - should match input voltage when on mains power + """ + outputVoltage: Float! + + """ + Current load on the UPS as a percentage of its capacity. Unit: percent (%). Example: 25 means UPS is loaded at 25% of its maximum capacity + """ + loadPercentage: Int! + + """ + Nominal power capacity of the UPS. Unit: watts (W). Example: 1000 means the UPS is rated for 1000 watts. This is the maximum power the UPS can deliver + """ + nominalPower: Int + + """ + Current power consumption calculated from load percentage and nominal power. Unit: watts (W). Example: 350 means 350 watts currently being used. Calculated as: nominalPower * (loadPercentage / 100) + """ + currentPower: Float +} + +type UPSDevice { + """ + Unique identifier for the UPS device. Usually based on the model name or a generated ID + """ + id: ID! + + """Display name for the UPS device. Can be customized by the user""" + name: String! + + """UPS model name/number. Example: 'APC Back-UPS Pro 1500'""" + model: String! + + """ + Current operational status of the UPS. Common values: 'Online', 'On Battery', 'Low Battery', 'Replace Battery', 'Overload', 'Offline'. 'Online' means running on mains power, 'On Battery' means running on battery backup + """ + status: String! + + """Battery-related information""" + battery: UPSBattery! + + """Power-related information""" + power: UPSPower! +} + +type UPSConfiguration { + """ + UPS service state. Values: 'enable' or 'disable'. Controls whether the UPS monitoring service is running + """ + service: String + + """ + Type of cable connecting the UPS to the server. Common values: 'usb', 'smart', 'ether', 'custom'. Determines communication protocol + """ + upsCable: String + + """ + Custom cable configuration string. Only used when upsCable is set to 'custom'. Format depends on specific UPS model + """ + customUpsCable: String + + """ + UPS communication type. Common values: 'usb', 'net', 'snmp', 'dumb', 'pcnet', 'modbus'. Defines how the server communicates with the UPS + """ + upsType: String + + """ + Device path or network address for UPS connection. Examples: '/dev/ttyUSB0' for USB, '192.168.1.100:3551' for network. Depends on upsType setting + """ + device: String + + """ + Override UPS capacity for runtime calculations. Unit: volt-amperes (VA). Example: 1500 for a 1500VA UPS. Leave unset to use UPS-reported capacity + """ + overrideUpsCapacity: Int + + """ + Battery level threshold for shutdown. Unit: percent (%). Example: 10 means shutdown when battery reaches 10%. System will shutdown when battery drops to this level + """ + batteryLevel: Int + + """ + Runtime threshold for shutdown. Unit: minutes. Example: 5 means shutdown when 5 minutes runtime remaining. System will shutdown when estimated runtime drops below this + """ + minutes: Int + + """ + Timeout for UPS communications. Unit: seconds. Example: 0 means no timeout. Time to wait for UPS response before considering it offline + """ + timeout: Int + + """ + Kill UPS power after shutdown. Values: 'yes' or 'no'. If 'yes', tells UPS to cut power after system shutdown. Useful for ensuring complete power cycle + """ + killUps: String + + """ + Network Information Server (NIS) IP address. Default: '0.0.0.0' (listen on all interfaces). IP address for apcupsd network information server + """ + nisIp: String + + """ + Network server mode. Values: 'on' or 'off'. Enable to allow network clients to monitor this UPS + """ + netServer: String + + """ + UPS name for network monitoring. Used to identify this UPS on the network. Example: 'SERVER_UPS' + """ + upsName: String + + """ + Override UPS model name. Used for display purposes. Leave unset to use UPS-reported model + """ + modelName: String +} + +type VmDomain implements Node { + """The unique identifier for the vm (uuid)""" + id: PrefixedID! + + """A friendly name for the vm""" + name: String + + """Current domain vm state""" + state: VmState! + + """The UUID of the vm""" + uuid: String @deprecated(reason: "Use id instead") +} + +"""The state of a virtual machine""" +enum VmState { + NOSTATE + RUNNING + IDLE + PAUSED + SHUTDOWN + SHUTOFF + CRASHED + PMSUSPENDED +} + +type Vms implements Node { + id: PrefixedID! + domains: [VmDomain!] + domain: [VmDomain!] +} + +type Uptime { + timestamp: String +} + +type Service implements Node { + id: PrefixedID! + name: String + online: Boolean + uptime: Uptime + version: String } type UserAccount implements Node { id: PrefixedID! + + """The name of the user""" name: String! + + """A description of the user""" description: String! + + """The roles of the user""" roles: [Role!]! + + """The permissions of the user""" permissions: [Permission!] } -type RCloneRemote { +type Plugin { + """The name of the plugin package""" name: String! - type: String! - parameters: JSON! - config: JSON! + + """The version of the plugin package""" + version: String! + + """Whether the plugin has an API module""" + hasApiModule: Boolean + + """Whether the plugin has a CLI module""" + hasCliModule: Boolean } -type RCloneDrive { - name: String! - options: JSON! +type AccessUrl { + type: URL_TYPE! + name: String + ipv4: URL + ipv6: URL } -type RCloneBackupConfigForm { - id: ID! - dataSchema: JSON! - uiSchema: JSON! +enum URL_TYPE { + LAN + WIREGUARD + WAN + MDNS + OTHER + DEFAULT } -type RCloneBackupSettings { - remotes: [RCloneRemote!]! - drives: [RCloneDrive!]! - configForm(formOptions: RCloneConfigFormInput): RCloneBackupConfigForm! +""" +A field whose value conforms to the standard URL format as specified in RFC3986: https://www.ietf.org/rfc/rfc3986.txt. +""" +scalar URL + +type AccessUrlObject { + ipv4: String + ipv6: String + type: URL_TYPE! + name: String } -type RCloneMutations { - createRCloneRemote(input: CreateRCloneRemoteInput!): RCloneRemote! - deleteRCloneRemote(input: DeleteRCloneRemoteInput!): Boolean! +type ApiKeyResponse { + valid: Boolean! + error: String } -type Theme { - name: ThemeName! - headerBackgroundColor: String - headerPrimaryTextColor: String - headerSecondaryTextColor: String - showBannerGradient: Boolean! - showBannerImage: Boolean! - showHeaderDescription: Boolean! +type MinigraphqlResponse { + status: MinigraphStatus! + timeout: Int + error: String } -type FlashBackupStatus { - jobId: String +"""The status of the minigraph""" +enum MinigraphStatus { + PRE_INIT + CONNECTING + CONNECTED + PING_FAILURE + ERROR_RETRYING +} + +type CloudResponse { status: String! + ip: String + error: String } -type UpdateSettingsResponse { - restartRequired: Boolean! - values: JSON! - warnings: [String!] +type RelayResponse { + status: String! + timeout: String + error: String } -# ============================================================================ -# Root Query Type -# ============================================================================ +type Cloud { + error: String + apiKey: ApiKeyResponse! + relay: RelayResponse + minigraphql: MinigraphqlResponse! + cloud: CloudResponse! + allowedOrigins: [String!]! +} + +type RemoteAccess { + """The type of WAN access used for Remote Access""" + accessType: WAN_ACCESS_TYPE! + + """The type of port forwarding used for Remote Access""" + forwardType: WAN_FORWARD_TYPE + + """The port used for Remote Access""" + port: Int +} + +enum WAN_ACCESS_TYPE { + DYNAMIC + ALWAYS + DISABLED +} + +enum WAN_FORWARD_TYPE { + UPNP + STATIC +} + +type DynamicRemoteAccessStatus { + """The type of dynamic remote access that is enabled""" + enabledType: DynamicRemoteAccessType! + + """The type of dynamic remote access that is currently running""" + runningType: DynamicRemoteAccessType! + + """Any error message associated with the dynamic remote access""" + error: String +} + +enum DynamicRemoteAccessType { + STATIC + UPNP + DISABLED +} + +type ConnectSettingsValues { + """The type of WAN access used for Remote Access""" + accessType: WAN_ACCESS_TYPE! + + """The type of port forwarding used for Remote Access""" + forwardType: WAN_FORWARD_TYPE + + """The port used for Remote Access""" + port: Int +} + +type ConnectSettings implements Node { + id: PrefixedID! + + """The data schema for the Connect settings""" + dataSchema: JSON! + + """The UI schema for the Connect settings""" + uiSchema: JSON! + + """The values for the Connect settings""" + values: ConnectSettingsValues! +} + +type Connect implements Node { + id: PrefixedID! + + """The status of dynamic remote access""" + dynamicRemoteAccess: DynamicRemoteAccessStatus! + + """The settings for the Connect instance""" + settings: ConnectSettings! +} + +type Network implements Node { + id: PrefixedID! + accessUrls: [AccessUrl!] +} + +input AccessUrlObjectInput { + ipv4: String + ipv6: String + type: URL_TYPE! + name: String +} + +"\n### Description:\n\nID scalar type that prefixes the underlying ID with the server identifier on output and strips it on input.\n\nWe use this scalar type to ensure that the ID is unique across all servers, allowing the same underlying resource ID to be used across different server instances.\n\n#### Input Behavior:\n\nWhen providing an ID as input (e.g., in arguments or input objects), the server identifier prefix (':') is optional.\n\n- If the prefix is present (e.g., '123:456'), it will be automatically stripped, and only the underlying ID ('456') will be used internally.\n- If the prefix is absent (e.g., '456'), the ID will be used as-is.\n\nThis makes it flexible for clients, as they don't strictly need to know or provide the server ID.\n\n#### Output Behavior:\n\nWhen an ID is returned in the response (output), it will *always* be prefixed with the current server's unique identifier (e.g., '123:456').\n\n#### Example:\n\nNote: The server identifier is '123' in this example.\n\n##### Input (Prefix Optional):\n```graphql\n# Both of these are valid inputs resolving to internal ID '456'\n{\n someQuery(id: \"123:456\") { ... }\n anotherQuery(id: \"456\") { ... }\n}\n```\n\n##### Output (Prefix Always Added):\n```graphql\n# Assuming internal ID is '456'\n{\n \"data\": {\n \"someResource\": {\n \"id\": \"123:456\" \n }\n }\n}\n```\n " +scalar PrefixedID + type Query { - # Array - array: UnraidArray! - parityHistory: [ParityCheck!]! - - # Config - config: Config! - - # Disks - disk(id: PrefixedID!): Disk! - disks: [Disk!]! - - # Docker - docker: Docker! - dockerNetwork(id: PrefixedID!): DockerNetwork - dockerNetworks: [DockerNetwork!] - - # Flash - flash: Flash! - - # Info - info: Info! - - # Logs - logFile(path: String!, lines: Int, startLine: Int): LogFileContent! - logFiles: [LogFile!]! - - # Metrics - metrics: Metrics! - - # Notifications - notifications: Notifications! - - # Online - online: Boolean! - - # Owner - owner: Owner! - - # API Keys - apiKey(id: PrefixedID!): ApiKey apiKeys: [ApiKey!]! + apiKey(id: PrefixedID!): ApiKey - # RClone - rclone: RCloneBackupSettings! + """All possible roles for API keys""" + apiKeyPossibleRoles: [Role!]! - # Registration - registration: Registration + """All possible permissions for API keys""" + apiKeyPossiblePermissions: [Permission!]! - # Servers - server: Server - servers: [Server!]! + """Get the actual permissions that would be granted by a set of roles""" + getPermissionsForRoles(roles: [Role!]!): [Permission!]! - # Services - services: [Service!]! + """ + Preview the effective permissions for a combination of roles and explicit permissions + """ + previewEffectivePermissions(roles: [Role!], permissions: [AddPermissionInput!]): [Permission!]! - # Settings - settings: Settings! + """Get all available authentication actions with possession""" + getAvailableAuthActions: [AuthAction!]! - # Shares - shares: [Share!]! - - # Unassigned devices - unassignedDevices: [UnassignedDevice!] - - # UPS - upsConfiguration: UPSConfiguration! - upsDeviceById(id: PrefixedID!): UPSDevice - upsDevices: [UPSDevice!]! - - # User + """Get JSON Schema for API key creation form""" + getApiKeyCreationFormSchema: ApiKeyFormSettings! + config: Config! + display: InfoDisplay! + flash: Flash! me: UserAccount! - # Vars + """Get all notifications""" + notifications: Notifications! + online: Boolean! + owner: Owner! + registration: Registration + server: Server + servers: [Server!]! + services: [Service!]! + shares: [Share!]! vars: Vars! - # VMs + """Get information about all VMs on the system""" vms: Vms! + parityHistory: [ParityCheck!]! + array: UnraidArray! + customization: Customization - # Network (used by MCP tool) - network: Network + """Whether the system is a fresh install (no license key)""" + isFreshInstall: Boolean! + publicTheme: Theme! + info: Info! + docker: Docker! + disks: [Disk!]! + disk(id: PrefixedID!): Disk! + rclone: RCloneBackupSettings! + logFiles: [LogFile!]! + logFile(path: String!, lines: Int, startLine: Int): LogFileContent! + settings: Settings! + isSSOEnabled: Boolean! - # Connect (used by MCP tool) - connect: ConnectSettings + """Get public OIDC provider information for login buttons""" + publicOidcProviders: [PublicOidcProvider!]! + + """Get all configured OIDC providers (admin only)""" + oidcProviders: [OidcProvider!]! + + """Get a specific OIDC provider by ID""" + oidcProvider(id: PrefixedID!): OidcProvider + + """Get the full OIDC configuration (admin only)""" + oidcConfiguration: OidcConfiguration! + + """Validate an OIDC session token (internal use for CLI validation)""" + validateOidcSession(token: String!): OidcSessionValidation! + metrics: Metrics! + + """Retrieve current system time configuration""" + systemTime: SystemTime! + + """Retrieve available time zone options""" + timeZoneOptions: [TimeZoneOption!]! + upsDevices: [UPSDevice!]! + upsDeviceById(id: String!): UPSDevice + upsConfiguration: UPSConfiguration! + + """Retrieve a plugin installation operation by identifier""" + pluginInstallOperation(operationId: ID!): PluginInstallOperation + + """List all tracked plugin installation operations""" + pluginInstallOperations: [PluginInstallOperation!]! + + """List installed Unraid OS plugins by .plg filename""" + installedUnraidPlugins: [String!]! + + """List all installed plugins with their metadata""" + plugins: [Plugin!]! + remoteAccess: RemoteAccess! + connect: Connect! + network: Network! + cloud: Cloud! } -# ============================================================================ -# Root Mutation Type -# ============================================================================ type Mutation { - # Array - array: ArrayMutations! - - # Parity - parityCheck: ParityCheckMutations! - - # Docker - docker: DockerMutations! - - # Notifications (root-level) + """Creates a new notification record""" createNotification(input: NotificationData!): Notification! - archiveNotification(id: PrefixedID!): Notification! - archiveAll(importance: NotificationImportance): NotificationOverview! deleteNotification(id: PrefixedID!, type: NotificationType!): NotificationOverview! + + """Deletes all archived notifications on server.""" deleteArchivedNotifications: NotificationOverview! + + """Marks a notification as archived.""" + archiveNotification(id: PrefixedID!): Notification! + archiveNotifications(ids: [PrefixedID!]!): NotificationOverview! + + """ + Creates a notification if an equivalent unread notification does not already exist. + """ + notifyIfUnique(input: NotificationData!): Notification + archiveAll(importance: NotificationImportance): NotificationOverview! + + """Marks a notification as unread.""" unreadNotification(id: PrefixedID!): Notification! - # Also accessible as nested (used by MCP tools) - notifications: Notifications! + unarchiveNotifications(ids: [PrefixedID!]!): NotificationOverview! + unarchiveAll(importance: NotificationImportance): NotificationOverview! - # API Keys (root-level aliases used by keys.py) - createApiKey(input: CreateApiKeyInput!): ApiKey! - updateApiKey(input: UpdateApiKeyInput!): ApiKey! - deleteApiKeys(input: DeleteApiKeysInput!): Boolean! - # Nested API key mutations - apiKey: ApiKeyMutations! - - # RClone - rclone: RCloneMutations! - - # VM + """Reads each notification to recompute & update the overview.""" + recalculateOverview: NotificationOverview! + array: ArrayMutations! + docker: DockerMutations! vm: VmMutations! + parityCheck: ParityCheckMutations! + apiKey: ApiKeyMutations! + customization: CustomizationMutations! + rclone: RCloneMutations! + onboarding: OnboardingMutations! + unraidPlugins: UnraidPluginsMutations! - # Settings + """Update server name, comment, and model""" + updateServerIdentity(name: String!, comment: String, sysModel: String): Server! + updateSshSettings(input: UpdateSshInput!): Vars! + createDockerFolder(name: String!, parentId: String, childrenIds: [String!]): ResolvedOrganizerV1! + setDockerFolderChildren(folderId: String, childrenIds: [String!]!): ResolvedOrganizerV1! + deleteDockerEntries(entryIds: [String!]!): ResolvedOrganizerV1! + moveDockerEntriesToFolder(sourceEntryIds: [String!]!, destinationFolderId: String!): ResolvedOrganizerV1! + moveDockerItemsToPosition(sourceEntryIds: [String!]!, destinationFolderId: String!, position: Float!): ResolvedOrganizerV1! + renameDockerFolder(folderId: String!, newName: String!): ResolvedOrganizerV1! + createDockerFolderWithItems(name: String!, parentId: String, sourceEntryIds: [String!], position: Float): ResolvedOrganizerV1! + updateDockerViewPreferences(viewId: String = "default", prefs: JSON!): ResolvedOrganizerV1! + syncDockerTemplatePaths: DockerTemplateSyncResult! + + """ + Reset Docker template mappings to defaults. Use this to recover from corrupted state. + """ + resetDockerTemplateMappings: Boolean! + refreshDockerDigests: Boolean! + + """Initiates a flash drive backup using a configured remote.""" + initiateFlashBackup(input: InitiateFlashBackupInput!): FlashBackupStatus! updateSettings(input: JSON!): UpdateSettingsResponse! + updateTemperatureConfig(input: TemperatureConfigInput!): Boolean! - # UPS + """Update system time configuration""" + updateSystemTime(input: UpdateSystemTimeInput!): SystemTime! configureUps(config: UPSConfigInput!): Boolean! + + """ + Add one or more plugins to the API. Returns false if restart was triggered automatically, true if manual restart is required. + """ + addPlugin(input: PluginManagementInput!): Boolean! + + """ + Remove one or more plugins from the API. Returns false if restart was triggered automatically, true if manual restart is required. + """ + removePlugin(input: PluginManagementInput!): Boolean! + updateApiSettings(input: ConnectSettingsInput!): ConnectSettingsValues! + connectSignIn(input: ConnectSignInInput!): Boolean! + connectSignOut: Boolean! + setupRemoteAccess(input: SetupRemoteAccessInput!): Boolean! + enableDynamicRemoteAccess(input: EnableDynamicRemoteAccessInput!): Boolean! +} + +input NotificationData { + title: String! + subject: String! + description: String! + importance: NotificationImportance! + link: String +} + +input UpdateSshInput { + enabled: Boolean! + + """SSH Port (default 22)""" + port: Int! +} + +input InitiateFlashBackupInput { + """The name of the remote configuration to use for the backup.""" + remoteName: String! + + """Source path to backup (typically the flash drive).""" + sourcePath: String! + + """Destination path on the remote.""" + destinationPath: String! + + """ + Additional options for the backup operation, such as --dry-run or --transfers. + """ + options: JSON +} + +input TemperatureConfigInput { + enabled: Boolean + polling_interval: Int + default_unit: TemperatureUnit + sensors: TemperatureSensorsConfigInput + thresholds: TemperatureThresholdsConfigInput + history: TemperatureHistoryConfigInput +} + +input TemperatureSensorsConfigInput { + lm_sensors: LmSensorsConfigInput + smartctl: SensorConfigInput + ipmi: IpmiConfigInput +} + +input LmSensorsConfigInput { + enabled: Boolean + config_path: String +} + +input SensorConfigInput { + enabled: Boolean +} + +input IpmiConfigInput { + enabled: Boolean + args: [String!] +} + +input TemperatureThresholdsConfigInput { + cpu_warning: Int + cpu_critical: Int + disk_warning: Int + disk_critical: Int + warning: Int + critical: Int +} + +input TemperatureHistoryConfigInput { + max_readings: Int + retention_ms: Int +} + +input UpdateSystemTimeInput { + """New IANA timezone identifier to apply""" + timeZone: String + + """Enable or disable NTP-based synchronization""" + useNtp: Boolean + + """ + Ordered list of up to four NTP servers. Supply empty strings to clear positions. + """ + ntpServers: [String!] + + """ + Manual date/time to apply when disabling NTP, expected format YYYY-MM-DD HH:mm:ss + """ + manualDateTime: String +} + +input UPSConfigInput { + """Enable or disable the UPS monitoring service""" + service: UPSServiceState + + """Type of cable connecting the UPS to the server""" + upsCable: UPSCableType + + """ + Custom cable configuration (only used when upsCable is CUSTOM). Format depends on specific UPS model + """ + customUpsCable: String + + """UPS communication protocol""" + upsType: UPSType + + """ + Device path or network address for UPS connection. Examples: '/dev/ttyUSB0' for USB, '192.168.1.100:3551' for network + """ + device: String + + """ + Override UPS capacity for runtime calculations. Unit: watts (W). Leave unset to use UPS-reported capacity + """ + overrideUpsCapacity: Int + + """ + Battery level percentage to initiate shutdown. Unit: percent (%) - Valid range: 0-100 + """ + batteryLevel: Int + + """Runtime left in minutes to initiate shutdown. Unit: minutes""" + minutes: Int + + """ + Time on battery before shutdown. Unit: seconds. Set to 0 to disable timeout-based shutdown + """ + timeout: Int + + """ + Turn off UPS power after system shutdown. Useful for ensuring complete power cycle + """ + killUps: UPSKillPower +} + +"""Service state for UPS daemon""" +enum UPSServiceState { + ENABLE + DISABLE +} + +"""UPS cable connection types""" +enum UPSCableType { + USB + SIMPLE + SMART + ETHER + CUSTOM +} + +"""UPS communication protocols""" +enum UPSType { + USB + APCSMART + NET + SNMP + DUMB + PCNET + MODBUS +} + +"""Kill UPS power after shutdown option""" +enum UPSKillPower { + YES + NO +} + +input PluginManagementInput { + """Array of plugin package names to add or remove""" + names: [String!]! + + """ + Whether to treat plugins as bundled plugins. Bundled plugins are installed to node_modules at build time and controlled via config only. + """ + bundled: Boolean! = false + + """ + Whether to restart the API after the operation. When false, a restart has already been queued. + """ + restart: Boolean! = true +} + +input ConnectSettingsInput { + """The type of WAN access to use for Remote Access""" + accessType: WAN_ACCESS_TYPE + + """The type of port forwarding to use for Remote Access""" + forwardType: WAN_FORWARD_TYPE + + """ + The port to use for Remote Access. Not required for UPNP forwardType. Required for STATIC forwardType. Ignored if accessType is DISABLED or forwardType is UPNP. + """ + port: Int +} + +input ConnectSignInInput { + """The API key for authentication""" + apiKey: String! + + """User information for the sign-in""" + userInfo: ConnectUserInfoInput +} + +input ConnectUserInfoInput { + """The preferred username of the user""" + preferred_username: String! + + """The email address of the user""" + email: String! + + """The avatar URL of the user""" + avatar: String +} + +input SetupRemoteAccessInput { + """The type of WAN access to use for Remote Access""" + accessType: WAN_ACCESS_TYPE! + + """The type of port forwarding to use for Remote Access""" + forwardType: WAN_FORWARD_TYPE + + """ + The port to use for Remote Access. Not required for UPNP forwardType. Required for STATIC forwardType. Ignored if accessType is DISABLED or forwardType is UPNP. + """ + port: Int +} + +input EnableDynamicRemoteAccessInput { + """The AccessURL Input for dynamic remote access""" + url: AccessUrlInput! + + """Whether to enable or disable dynamic remote access""" + enabled: Boolean! +} + +input AccessUrlInput { + type: URL_TYPE! + name: String + ipv4: URL + ipv6: URL } -# ============================================================================ -# Root Subscription Type -# ============================================================================ type Subscription { - arraySubscription: UnraidArray! - logFile(path: String!): LogFileContent! + displaySubscription: InfoDisplay! notificationAdded: Notification! notificationsOverview: NotificationOverview! + notificationsWarningsAndAlerts: [Notification!]! ownerSubscription: Owner! - parityHistorySubscription: ParityCheck! serversSubscription: Server! + parityHistorySubscription: ParityCheck! + arraySubscription: UnraidArray! + dockerContainerStats: DockerContainerStats! + logFile(path: String!): LogFileContent! systemMetricsCpu: CpuUtilization! systemMetricsCpuTelemetry: CpuPackages! systemMetricsMemory: MemoryUtilization! + systemMetricsTemperature: TemperatureMetrics upsUpdates: UPSDevice! + pluginInstallUpdates(operationId: ID!): PluginInstallEvent! } diff --git a/pyproject.toml b/pyproject.toml index 0515555..ad129aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ build-backend = "hatchling.build" # ============================================================================ [project] name = "unraid-mcp" -version = "0.2.0" +version = "0.2.1" description = "MCP Server for Unraid API - provides tools to interact with an Unraid server's GraphQL API" readme = "README.md" license = {file = "LICENSE"} @@ -189,7 +189,7 @@ ignore = [ [tool.ruff.lint.per-file-ignores] "__init__.py" = ["F401", "D104"] -"tests/**/*.py" = ["D", "S101", "PLR2004"] # Allow asserts and magic values in tests +"tests/**/*.py" = ["D", "S101", "S105", "S106", "S107", "PLR2004"] # Allow test-only patterns [tool.ruff.lint.pydocstyle] convention = "google" diff --git a/tests/http_layer/test_request_construction.py b/tests/http_layer/test_request_construction.py index e588c77..c183788 100644 --- a/tests/http_layer/test_request_construction.py +++ b/tests/http_layer/test_request_construction.py @@ -659,9 +659,10 @@ class TestArrayToolRequests: return_value=_graphql_response({"parityCheck": {"start": True}}) ) tool = self._get_tool() - result = await tool(action="parity_start") + result = await tool(action="parity_start", correct=False) body = _extract_request_body(route.calls.last.request) assert "StartParityCheck" in body["query"] + assert body["variables"] == {"correct": False} assert result["success"] is True @respx.mock @@ -858,9 +859,9 @@ class TestNotificationsToolRequests: async def test_create_sends_input_variables(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"notifications": {"createNotification": { + {"createNotification": { "id": "n1", "title": "Test", "importance": "INFO", - }}} + }} ) ) tool = self._get_tool() @@ -882,7 +883,7 @@ class TestNotificationsToolRequests: async def test_archive_sends_id_variable(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"notifications": {"archiveNotification": True}} + {"archiveNotification": {"id": "notif-1"}} ) ) tool = self._get_tool() @@ -901,7 +902,7 @@ class TestNotificationsToolRequests: async def test_delete_sends_id_and_type(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"notifications": {"deleteNotification": True}} + {"deleteNotification": {"unread": {"total": 0}}} ) ) tool = self._get_tool() @@ -920,7 +921,7 @@ class TestNotificationsToolRequests: async def test_archive_all_sends_importance_when_provided(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"notifications": {"archiveAll": True}} + {"archiveAll": {"archive": {"total": 1}}} ) ) tool = self._get_tool() @@ -1087,10 +1088,10 @@ class TestKeysToolRequests: async def test_create_sends_input_variables(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"createApiKey": { + {"apiKey": {"create": { "id": "k2", "name": "new-key", "key": "secret", "roles": ["read"], - }} + }}} ) ) tool = self._get_tool() @@ -1106,7 +1107,7 @@ class TestKeysToolRequests: async def test_update_sends_input_variables(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"updateApiKey": {"id": "k1", "name": "renamed", "roles": ["admin"]}} + {"apiKey": {"update": {"id": "k1", "name": "renamed", "roles": ["admin"]}}} ) ) tool = self._get_tool() @@ -1126,12 +1127,12 @@ class TestKeysToolRequests: @respx.mock async def test_delete_sends_ids_when_confirmed(self) -> None: route = respx.post(API_URL).mock( - return_value=_graphql_response({"deleteApiKeys": True}) + return_value=_graphql_response({"apiKey": {"delete": True}}) ) tool = self._get_tool() result = await tool(action="delete", key_id="k1", confirm=True) body = _extract_request_body(route.calls.last.request) - assert "DeleteApiKeys" in body["query"] + assert "DeleteApiKey" in body["query"] assert body["variables"]["input"]["ids"] == ["k1"] assert result["success"] is True diff --git a/tests/safety/test_destructive_guards.py b/tests/safety/test_destructive_guards.py index 1512906..4432bfe 100644 --- a/tests/safety/test_destructive_guards.py +++ b/tests/safety/test_destructive_guards.py @@ -305,7 +305,7 @@ class TestConfirmAllowsExecution: assert result["success"] is True async def test_notifications_delete_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None: - _mock_notif_graphql.return_value = {"notifications": {"deleteNotification": True}} + _mock_notif_graphql.return_value = {"deleteNotification": {"unread": {"total": 0}}} tool_fn = make_tool_fn( "unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications" ) @@ -318,7 +318,7 @@ class TestConfirmAllowsExecution: assert result["success"] is True async def test_notifications_delete_archived_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None: - _mock_notif_graphql.return_value = {"notifications": {"deleteArchivedNotifications": True}} + _mock_notif_graphql.return_value = {"deleteArchivedNotifications": {"archive": {"total": 0}}} tool_fn = make_tool_fn( "unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications" ) @@ -332,7 +332,7 @@ class TestConfirmAllowsExecution: assert result["success"] is True async def test_keys_delete_with_confirm(self, _mock_keys_graphql: AsyncMock) -> None: - _mock_keys_graphql.return_value = {"deleteApiKeys": True} + _mock_keys_graphql.return_value = {"apiKey": {"delete": True}} tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys") result = await tool_fn(action="delete", key_id="key-123", confirm=True) assert result["success"] is True diff --git a/tests/test_array.py b/tests/test_array.py index 8717d78..9837022 100644 --- a/tests/test_array.py +++ b/tests/test_array.py @@ -39,12 +39,17 @@ class TestArrayValidation: with pytest.raises(ToolError, match="Invalid action"): await tool_fn(action=action) + async def test_parity_start_requires_correct(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="correct is required"): + await tool_fn(action="parity_start") + class TestArrayActions: async def test_parity_start(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = {"parityCheck": {"start": True}} tool_fn = _make_tool() - result = await tool_fn(action="parity_start") + result = await tool_fn(action="parity_start", correct=False) assert result["success"] is True assert result["action"] == "parity_start" _mock_graphql.assert_called_once() @@ -94,14 +99,14 @@ class TestArrayMutationFailures: async def test_parity_start_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = {"parityCheck": {"start": False}} tool_fn = _make_tool() - result = await tool_fn(action="parity_start") + result = await tool_fn(action="parity_start", correct=False) assert result["success"] is True assert result["data"] == {"parityCheck": {"start": False}} async def test_parity_start_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = {"parityCheck": {"start": None}} tool_fn = _make_tool() - result = await tool_fn(action="parity_start") + result = await tool_fn(action="parity_start", correct=False) assert result["success"] is True assert result["data"] == {"parityCheck": {"start": None}} @@ -110,7 +115,7 @@ class TestArrayMutationFailures: ) -> None: _mock_graphql.return_value = {"parityCheck": {"start": {}}} tool_fn = _make_tool() - result = await tool_fn(action="parity_start") + result = await tool_fn(action="parity_start", correct=False) assert result["success"] is True assert result["data"] == {"parityCheck": {"start": {}}} @@ -128,7 +133,7 @@ class TestArrayNetworkErrors: _mock_graphql.side_effect = ToolError("HTTP error 500: Internal Server Error") tool_fn = _make_tool() with pytest.raises(ToolError, match="HTTP error 500"): - await tool_fn(action="parity_start") + await tool_fn(action="parity_start", correct=False) async def test_connection_refused(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.side_effect = ToolError("Network connection error: Connection refused") diff --git a/tests/test_keys.py b/tests/test_keys.py index 2236fbe..28b23e4 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -65,7 +65,9 @@ class TestKeysActions: async def test_create(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "createApiKey": {"id": "k:new", "name": "new-key", "key": "secret123", "roles": []} + "apiKey": { + "create": {"id": "k:new", "name": "new-key", "key": "secret123", "roles": []} + } } tool_fn = _make_tool() result = await tool_fn(action="create", name="new-key") @@ -74,11 +76,13 @@ class TestKeysActions: async def test_create_with_roles(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "createApiKey": { - "id": "k:new", - "name": "admin-key", - "key": "secret", - "roles": ["admin"], + "apiKey": { + "create": { + "id": "k:new", + "name": "admin-key", + "key": "secret", + "roles": ["admin"], + } } } tool_fn = _make_tool() @@ -86,13 +90,15 @@ class TestKeysActions: assert result["success"] is True async def test_update(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"updateApiKey": {"id": "k:1", "name": "renamed", "roles": []}} + _mock_graphql.return_value = { + "apiKey": {"update": {"id": "k:1", "name": "renamed", "roles": []}} + } tool_fn = _make_tool() result = await tool_fn(action="update", key_id="k:1", name="renamed") assert result["success"] is True async def test_delete(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"deleteApiKeys": True} + _mock_graphql.return_value = {"apiKey": {"delete": True}} tool_fn = _make_tool() result = await tool_fn(action="delete", key_id="k:1", confirm=True) assert result["success"] is True diff --git a/tests/test_notifications.py b/tests/test_notifications.py index 40dae42..890d5fd 100644 --- a/tests/test_notifications.py +++ b/tests/test_notifications.py @@ -82,9 +82,7 @@ class TestNotificationsActions: async def test_create(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "notifications": { - "createNotification": {"id": "n:new", "title": "Test", "importance": "INFO"} - } + "createNotification": {"id": "n:new", "title": "Test", "importance": "INFO"} } tool_fn = _make_tool() result = await tool_fn( @@ -97,13 +95,13 @@ class TestNotificationsActions: assert result["success"] is True async def test_archive_notification(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"notifications": {"archiveNotification": True}} + _mock_graphql.return_value = {"archiveNotification": {"id": "n:1"}} tool_fn = _make_tool() result = await tool_fn(action="archive", notification_id="n:1") assert result["success"] is True async def test_delete_with_confirm(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"notifications": {"deleteNotification": True}} + _mock_graphql.return_value = {"deleteNotification": {"unread": {"total": 0}}} tool_fn = _make_tool() result = await tool_fn( action="delete", @@ -114,13 +112,13 @@ class TestNotificationsActions: assert result["success"] is True async def test_archive_all(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"notifications": {"archiveAll": True}} + _mock_graphql.return_value = {"archiveAll": {"archive": {"total": 1}}} tool_fn = _make_tool() result = await tool_fn(action="archive_all") assert result["success"] is True async def test_unread_notification(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"notifications": {"unreadNotification": True}} + _mock_graphql.return_value = {"unreadNotification": {"id": "n:1"}} tool_fn = _make_tool() result = await tool_fn(action="unread", notification_id="n:1") assert result["success"] is True @@ -140,7 +138,7 @@ class TestNotificationsActions: assert filter_var["offset"] == 5 async def test_delete_archived(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"notifications": {"deleteArchivedNotifications": True}} + _mock_graphql.return_value = {"deleteArchivedNotifications": {"archive": {"total": 0}}} tool_fn = _make_tool() result = await tool_fn(action="delete_archived", confirm=True) assert result["success"] is True @@ -180,9 +178,7 @@ class TestNotificationsCreateValidation: ) async def test_alert_importance_accepted(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = { - "notifications": {"createNotification": {"id": "n:1", "importance": "ALERT"}} - } + _mock_graphql.return_value = {"createNotification": {"id": "n:1", "importance": "ALERT"}} tool_fn = _make_tool() result = await tool_fn( action="create", title="T", subject="S", description="D", importance="alert" @@ -223,9 +219,7 @@ class TestNotificationsCreateValidation: ) async def test_title_at_max_accepted(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = { - "notifications": {"createNotification": {"id": "n:1", "importance": "NORMAL"}} - } + _mock_graphql.return_value = {"createNotification": {"id": "n:1", "importance": "NORMAL"}} tool_fn = _make_tool() result = await tool_fn( action="create", diff --git a/unraid_mcp/tools/array.py b/unraid_mcp/tools/array.py index 85fe93b..b712849 100644 --- a/unraid_mcp/tools/array.py +++ b/unraid_mcp/tools/array.py @@ -22,7 +22,7 @@ QUERIES: dict[str, str] = { MUTATIONS: dict[str, str] = { "parity_start": """ - mutation StartParityCheck($correct: Boolean) { + mutation StartParityCheck($correct: Boolean!) { parityCheck { start(correct: $correct) } } """, @@ -92,7 +92,9 @@ def register_array_tool(mcp: FastMCP) -> None: query = MUTATIONS[action] variables: dict[str, Any] | None = None - if action == "parity_start" and correct is not None: + if action == "parity_start": + if correct is None: + raise ToolError("correct is required for 'parity_start' action") variables = {"correct": correct} data = await make_graphql_request(query, variables) diff --git a/unraid_mcp/tools/keys.py b/unraid_mcp/tools/keys.py index 191c970..65dfeae 100644 --- a/unraid_mcp/tools/keys.py +++ b/unraid_mcp/tools/keys.py @@ -29,17 +29,17 @@ QUERIES: dict[str, str] = { MUTATIONS: dict[str, str] = { "create": """ mutation CreateApiKey($input: CreateApiKeyInput!) { - createApiKey(input: $input) { id name key roles } + apiKey { create(input: $input) { id name key roles } } } """, "update": """ mutation UpdateApiKey($input: UpdateApiKeyInput!) { - updateApiKey(input: $input) { id name roles } + apiKey { update(input: $input) { id name roles } } } """, "delete": """ - mutation DeleteApiKeys($input: DeleteApiKeysInput!) { - deleteApiKeys(input: $input) + mutation DeleteApiKey($input: DeleteApiKeyInput!) { + apiKey { delete(input: $input) } } """, } @@ -116,7 +116,7 @@ def register_keys_tool(mcp: FastMCP) -> None: data = await make_graphql_request(MUTATIONS["create"], {"input": input_data}) return { "success": True, - "key": data.get("createApiKey", {}), + "key": (data.get("apiKey") or {}).get("create", {}), } if action == "update": @@ -130,14 +130,14 @@ def register_keys_tool(mcp: FastMCP) -> None: data = await make_graphql_request(MUTATIONS["update"], {"input": input_data}) return { "success": True, - "key": data.get("updateApiKey", {}), + "key": (data.get("apiKey") or {}).get("update", {}), } if action == "delete": if not key_id: raise ToolError("key_id is required for 'delete' action") data = await make_graphql_request(MUTATIONS["delete"], {"input": {"ids": [key_id]}}) - result = data.get("deleteApiKeys") + result = (data.get("apiKey") or {}).get("delete") if not result: raise ToolError( f"Failed to delete API key '{key_id}': no confirmation from server" diff --git a/unraid_mcp/tools/notifications.py b/unraid_mcp/tools/notifications.py index 3053a13..a40eedb 100644 --- a/unraid_mcp/tools/notifications.py +++ b/unraid_mcp/tools/notifications.py @@ -44,33 +44,33 @@ QUERIES: dict[str, str] = { MUTATIONS: dict[str, str] = { "create": """ - mutation CreateNotification($input: CreateNotificationInput!) { - notifications { createNotification(input: $input) { id title importance } } + mutation CreateNotification($input: NotificationData!) { + createNotification(input: $input) { id title importance } } """, "archive": """ mutation ArchiveNotification($id: PrefixedID!) { - notifications { archiveNotification(id: $id) } + archiveNotification(id: $id) } """, "unread": """ mutation UnreadNotification($id: PrefixedID!) { - notifications { unreadNotification(id: $id) } + unreadNotification(id: $id) } """, "delete": """ mutation DeleteNotification($id: PrefixedID!, $type: NotificationType!) { - notifications { deleteNotification(id: $id, type: $type) } + deleteNotification(id: $id, type: $type) } """, "delete_archived": """ mutation DeleteArchivedNotifications { - notifications { deleteArchivedNotifications } + deleteArchivedNotifications } """, "archive_all": """ mutation ArchiveAllNotifications($importance: NotificationImportance) { - notifications { archiveAll(importance: $importance) } + archiveAll(importance: $importance) } """, } From 60defc35ca8eb4a997561a7521563348e19b7b4b Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Fri, 13 Mar 2026 01:54:55 -0400 Subject: [PATCH 07/34] feat: add 5 notification mutations + comprehensive refactors from PR review New notification actions (archive_many, create_unique, unarchive_many, unarchive_all, recalculate) bring unraid_notifications to 14 actions. Also includes continuation of CodeRabbit/PR review fixes: - Remove redundant try-except in virtualization.py (silent failure fix) - Add QueryCache protocol with get/put/invalidate_all to core/client.py - Refactor subscriptions (manager, diagnostics, resources, utils) - Update config (logging, settings) for improved structure - Expand test coverage: http_layer, safety guards, schema validation - Minor cleanups: array, docker, health, keys tools Co-authored-by: Claude --- docker-compose.yml | 1 + docs/research/feature-gap-analysis.md | 6 +- docs/research/unraid-api-crawl.md | 17 + .../2026-03-13-graphql-mutations-expansion.md | 1742 +++++++++++++++++ pyproject.toml | 2 +- tests/http_layer/test_request_construction.py | 381 ++-- tests/safety/test_destructive_guards.py | 36 +- tests/schema/test_query_validation.py | 49 +- tests/test_array.py | 3 + tests/test_notifications.py | 140 +- unraid_mcp/config/logging.py | 50 +- unraid_mcp/config/settings.py | 29 +- unraid_mcp/core/client.py | 56 +- unraid_mcp/main.py | 9 +- unraid_mcp/server.py | 18 +- unraid_mcp/subscriptions/diagnostics.py | 27 +- unraid_mcp/subscriptions/manager.py | 40 +- unraid_mcp/subscriptions/resources.py | 23 +- unraid_mcp/subscriptions/utils.py | 35 + unraid_mcp/tools/__init__.py | 6 +- unraid_mcp/tools/array.py | 2 +- unraid_mcp/tools/docker.py | 4 +- unraid_mcp/tools/health.py | 37 +- unraid_mcp/tools/keys.py | 24 +- unraid_mcp/tools/notifications.py | 118 +- unraid_mcp/tools/virtualization.py | 74 +- uv.lock | 2 +- 27 files changed, 2508 insertions(+), 423 deletions(-) create mode 100644 docs/superpowers/plans/2026-03-13-graphql-mutations-expansion.md diff --git a/docker-compose.yml b/docker-compose.yml index db6a9cb..0f305de 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,6 +11,7 @@ services: tmpfs: - /tmp:noexec,nosuid,size=64m - /app/logs:noexec,nosuid,size=16m + - /app/.cache/logs:noexec,nosuid,size=8m ports: # HostPort:ContainerPort (maps to UNRAID_MCP_PORT inside the container, default 6970) # Change the host port (left side) if 6970 is already in use on your host diff --git a/docs/research/feature-gap-analysis.md b/docs/research/feature-gap-analysis.md index 588354f..a65fbeb 100644 --- a/docs/research/feature-gap-analysis.md +++ b/docs/research/feature-gap-analysis.md @@ -420,8 +420,8 @@ GRAPHQL_PUBSUB_CHANNEL { | `CreateApiKeyInput` | `apiKey.create` | `name!`, `description`, `roles[]`, `permissions[]`, `overwrite` | | `AddPermissionInput` | `addPermission` | `resource!`, `actions![]` | | `AddRoleForUserInput` | `addRoleForUser` | User + role assignment | -| `AddRoleForApiKeyInput` | `apiKey.addRole` | API key + role assignment | -| `RemoveRoleFromApiKeyInput` | `apiKey.removeRole` | API key + role removal | +| `AddRoleForApiKeyInput` | `addRoleForApiKey` | API key + role assignment | +| `RemoveRoleFromApiKeyInput` | `removeRoleFromApiKey` | API key + role removal | | `arrayDiskInput` | `addDiskToArray`, `removeDiskFromArray` | Disk assignment data | | `ConnectSignInInput` | `connectSignIn` | Connect credentials | | `EnableDynamicRemoteAccessInput` | `enableDynamicRemoteAccess` | Remote access config | @@ -619,7 +619,7 @@ The current MCP server has 10 tools (76 actions) after consolidation. The follow |--------------|---------------|---------------| | `list_api_keys()` | `apiKeys` query | Key inventory | | `get_api_key(id)` | `apiKey(id)` query | Key details | -| `create_api_key(input)` | `apiKey.create` mutation | Key provisioning | +| `create_api_key(input)` | `apiKey.create` mutation | Key provisioning — **already implemented** in `unraid_keys` | | `delete_api_keys(input)` | `apiKey.delete` mutation | Key cleanup | | `update_api_key(input)` | `apiKey.update` mutation | Key modification | diff --git a/docs/research/unraid-api-crawl.md b/docs/research/unraid-api-crawl.md index e854afa..dbcd209 100644 --- a/docs/research/unraid-api-crawl.md +++ b/docs/research/unraid-api-crawl.md @@ -713,6 +713,23 @@ type Mutation { addUser(input: addUserInput!): User deleteUser(input: deleteUserInput!): User } + +type ApiKeyMutations { + """Create an API key""" + create(input: CreateApiKeyInput!): ApiKey! + + """Add a role to an API key""" + addRole(input: AddRoleForApiKeyInput!): Boolean! + + """Remove a role from an API key""" + removeRole(input: RemoveRoleFromApiKeyInput!): Boolean! + + """Delete one or more API keys""" + delete(input: DeleteApiKeyInput!): Boolean! + + """Update an API key""" + update(input: UpdateApiKeyInput!): ApiKey! +} ``` > **Note:** The client schema above uses `ID!` for disk mutation args (e.g., `mountArrayDisk(id: ID!)`), but the actual server resolvers use `PrefixedID!`. The MCP tool code correctly uses `PrefixedID!` based on server source analysis. diff --git a/docs/superpowers/plans/2026-03-13-graphql-mutations-expansion.md b/docs/superpowers/plans/2026-03-13-graphql-mutations-expansion.md new file mode 100644 index 0000000..faf9394 --- /dev/null +++ b/docs/superpowers/plans/2026-03-13-graphql-mutations-expansion.md @@ -0,0 +1,1742 @@ +# GraphQL Mutations Expansion Implementation Plan + +> **For agentic workers:** REQUIRED: Use superpowers:subagent-driven-development (if subagents available) or superpowers:executing-plans to implement this plan. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Implement all 28 missing GraphQL mutations across 11 files, bringing the server from 76 to 104 actions. + +**Architecture:** Follows the existing consolidated action pattern — QUERIES/MUTATIONS dicts, ALL_ACTIONS set, Literal type with sync-guard, and handler branches inside the registered tool function. Five existing tool files gain new mutations; one new tool file (`tools/settings.py`) is created for the 9 settings/connect mutations; `server.py` gets one new import and registration. + +**Tech Stack:** Python 3.12+, FastMCP, httpx, pytest, uv + +--- + +## File Structure + +| File | Change | New actions | +|---|---|---| +| `unraid_mcp/tools/notifications.py` | Modify | +5 mutations | +| `unraid_mcp/tools/storage.py` | Modify | +1 mutation | +| `unraid_mcp/tools/info.py` | Modify | +2 mutations | +| `unraid_mcp/tools/docker.py` | Modify | +11 mutations | +| `unraid_mcp/tools/settings.py` | **Create** | 9 mutations | +| `unraid_mcp/server.py` | Modify | register settings tool | +| `tests/test_notifications.py` | Modify | tests for 5 new actions | +| `tests/test_storage.py` | Modify | tests for flash_backup | +| `tests/test_info.py` | Modify | tests for update_server, update_ssh | +| `tests/test_docker.py` | Modify | tests for 11 organizer actions | +| `tests/test_settings.py` | **Create** | tests for all 9 settings actions | + +--- + +## Chunk 1: Notifications — 5 new mutations + +### Task 1: Test the 5 new notification mutations (RED) + +**Files:** +- Modify: `tests/test_notifications.py` + +- [ ] **Step 1: Append the new test class to `tests/test_notifications.py`** + +```python +class TestNewNotificationMutations: + async def test_archive_many_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "archiveNotifications": { + "unread": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + "archive": {"info": 2, "warning": 0, "alert": 0, "total": 2}, + } + } + tool_fn = _make_tool() + result = await tool_fn(action="archive_many", notification_ids=["n:1", "n:2"]) + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1] == {"ids": ["n:1", "n:2"]} + + async def test_archive_many_requires_ids(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="notification_ids"): + await tool_fn(action="archive_many") + + async def test_create_unique_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "notifyIfUnique": {"id": "n:1", "title": "Test", "importance": "INFO"} + } + tool_fn = _make_tool() + result = await tool_fn( + action="create_unique", + title="Test", + subject="Subj", + description="Desc", + importance="info", + ) + assert result["success"] is True + + async def test_create_unique_returns_none_when_duplicate( + self, _mock_graphql: AsyncMock + ) -> None: + _mock_graphql.return_value = {"notifyIfUnique": None} + tool_fn = _make_tool() + result = await tool_fn( + action="create_unique", + title="T", + subject="S", + description="D", + importance="info", + ) + assert result["success"] is True + assert result["duplicate"] is True + + async def test_create_unique_requires_fields(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="requires title"): + await tool_fn(action="create_unique") + + async def test_unarchive_many_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "unarchiveNotifications": { + "unread": {"info": 2, "warning": 0, "alert": 0, "total": 2}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + } + } + tool_fn = _make_tool() + result = await tool_fn(action="unarchive_many", notification_ids=["n:1", "n:2"]) + assert result["success"] is True + + async def test_unarchive_many_requires_ids(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="notification_ids"): + await tool_fn(action="unarchive_many") + + async def test_unarchive_all_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "unarchiveAll": { + "unread": {"info": 5, "warning": 1, "alert": 0, "total": 6}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + } + } + tool_fn = _make_tool() + result = await tool_fn(action="unarchive_all") + assert result["success"] is True + + async def test_unarchive_all_with_importance(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"unarchiveAll": {"unread": {"total": 1}, "archive": {"total": 0}}} + tool_fn = _make_tool() + await tool_fn(action="unarchive_all", importance="WARNING") + call_args = _mock_graphql.call_args + assert call_args[0][1] == {"importance": "WARNING"} + + async def test_recalculate_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "recalculateOverview": { + "unread": {"info": 3, "warning": 1, "alert": 0, "total": 4}, + "archive": {"info": 10, "warning": 0, "alert": 0, "total": 10}, + } + } + tool_fn = _make_tool() + result = await tool_fn(action="recalculate") + assert result["success"] is True +``` + +- [ ] **Step 2: Run tests to confirm they fail** + +```bash +cd /home/jmagar/workspace/unraid-mcp +uv run pytest tests/test_notifications.py::TestNewNotificationMutations -v 2>&1 | tail -20 +``` + +Expected: All 10 tests FAIL with `ToolError: Invalid action` or collection errors. + +--- + +### Task 2: Implement the 5 new notification mutations (GREEN) + +**Files:** +- Modify: `unraid_mcp/tools/notifications.py` + +- [ ] **Step 3: Add 5 entries to the MUTATIONS dict** (insert after the existing `"archive_all"` entry, before the closing `}`) + +```python + "archive_many": """ + mutation ArchiveNotifications($ids: [PrefixedID!]!) { + archiveNotifications(ids: $ids) { + unread { info warning alert total } + archive { info warning alert total } + } + } + """, + "create_unique": """ + mutation NotifyIfUnique($input: NotificationData!) { + notifyIfUnique(input: $input) { id title importance } + } + """, + "unarchive_many": """ + mutation UnarchiveNotifications($ids: [PrefixedID!]!) { + unarchiveNotifications(ids: $ids) { + unread { info warning alert total } + archive { info warning alert total } + } + } + """, + "unarchive_all": """ + mutation UnarchiveAll($importance: NotificationImportance) { + unarchiveAll(importance: $importance) { + unread { info warning alert total } + archive { info warning alert total } + } + } + """, + "recalculate": """ + mutation RecalculateOverview { + recalculateOverview { + unread { info warning alert total } + archive { info warning alert total } + } + } + """, +``` + +- [ ] **Step 4: Update the NOTIFICATION_ACTIONS Literal and add `notification_ids` param** + +Replace the `NOTIFICATION_ACTIONS` Literal: + +```python +NOTIFICATION_ACTIONS = Literal[ + "overview", + "list", + "warnings", + "create", + "archive", + "unread", + "delete", + "delete_archived", + "archive_all", + "archive_many", + "create_unique", + "unarchive_many", + "unarchive_all", + "recalculate", +] +``` + +Add `notification_ids` to the tool function signature (after `description`): + +```python + notification_ids: list[str] | None = None, +``` + +Update the docstring to document the 5 new actions: + +``` + archive_many - Archive multiple notifications by ID (requires notification_ids) + create_unique - Create notification only if no equivalent unread exists (requires title, subject, description, importance) + unarchive_many - Move notifications back to unread (requires notification_ids) + unarchive_all - Move all archived notifications to unread (optional importance filter) + recalculate - Recompute overview counts from disk +``` + +- [ ] **Step 5: Add handler branches** (insert before the final `raise ToolError(...)` line) + +```python + if action == "archive_many": + if not notification_ids: + raise ToolError("notification_ids is required for 'archive_many' action") + data = await make_graphql_request(MUTATIONS["archive_many"], {"ids": notification_ids}) + return {"success": True, "action": "archive_many", "data": data} + + if action == "create_unique": + if title is None or subject is None or description is None or importance is None: + raise ToolError("create_unique requires title, subject, description, and importance") + if importance.upper() not in _VALID_IMPORTANCE: + raise ToolError( + f"importance must be one of: {', '.join(sorted(_VALID_IMPORTANCE))}. " + f"Got: '{importance}'" + ) + input_data = { + "title": title, + "subject": subject, + "description": description, + "importance": importance.upper(), + } + data = await make_graphql_request(MUTATIONS["create_unique"], {"input": input_data}) + notification = data.get("notifyIfUnique") + if notification is None: + return {"success": True, "duplicate": True, "data": None} + return {"success": True, "duplicate": False, "data": notification} + + if action == "unarchive_many": + if not notification_ids: + raise ToolError("notification_ids is required for 'unarchive_many' action") + data = await make_graphql_request(MUTATIONS["unarchive_many"], {"ids": notification_ids}) + return {"success": True, "action": "unarchive_many", "data": data} + + if action == "unarchive_all": + vars_: dict[str, Any] | None = None + if importance: + vars_ = {"importance": importance.upper()} + data = await make_graphql_request(MUTATIONS["unarchive_all"], vars_) + return {"success": True, "action": "unarchive_all", "data": data} + + if action == "recalculate": + data = await make_graphql_request(MUTATIONS["recalculate"]) + return {"success": True, "action": "recalculate", "data": data} +``` + +- [ ] **Step 6: Run tests — all must pass** + +```bash +uv run pytest tests/test_notifications.py -v 2>&1 | tail -30 +``` + +Expected: All tests PASS (original 24 + 10 new = 34 total). + +- [ ] **Step 7: Lint** + +```bash +uv run ruff check unraid_mcp/tools/notifications.py && uv run ruff format --check unraid_mcp/tools/notifications.py +``` + +Expected: No errors. + +- [ ] **Step 8: Commit** + +```bash +git add unraid_mcp/tools/notifications.py tests/test_notifications.py +git commit -m "feat: add 5 missing notification mutations (archive_many, create_unique, unarchive_many, unarchive_all, recalculate)" +``` + +--- + +## Chunk 2: Storage — flash_backup + Info — update_server/update_ssh + +### Task 3: Test flash_backup mutation (RED) + +**Files:** +- Modify: `tests/test_storage.py` + +- [ ] **Step 1: Append new test class to `tests/test_storage.py`** + +```python +class TestStorageFlashBackup: + async def test_flash_backup_requires_confirm(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destructive"): + await tool_fn( + action="flash_backup", + remote_name="myremote", + source_path="/boot", + destination_path="/backups/flash", + ) + + async def test_flash_backup_requires_remote_name(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="remote_name"): + await tool_fn(action="flash_backup", confirm=True) + + async def test_flash_backup_requires_source_path(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="source_path"): + await tool_fn(action="flash_backup", confirm=True, remote_name="r") + + async def test_flash_backup_requires_destination_path(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destination_path"): + await tool_fn(action="flash_backup", confirm=True, remote_name="r", source_path="/boot") + + async def test_flash_backup_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "initiateFlashBackup": {"status": "started", "jobId": "job-123"} + } + tool_fn = _make_tool() + result = await tool_fn( + action="flash_backup", + confirm=True, + remote_name="myremote", + source_path="/boot", + destination_path="/backups/flash", + ) + assert result["success"] is True + assert result["data"]["status"] == "started" + + async def test_flash_backup_passes_options(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"initiateFlashBackup": {"status": "started", "jobId": None}} + tool_fn = _make_tool() + await tool_fn( + action="flash_backup", + confirm=True, + remote_name="r", + source_path="/boot", + destination_path="/bak", + backup_options={"dry-run": True}, + ) + call_args = _mock_graphql.call_args + assert call_args[0][1]["input"]["options"] == {"dry-run": True} +``` + +- [ ] **Step 2: Run tests to confirm they fail** + +```bash +uv run pytest tests/test_storage.py::TestStorageFlashBackup -v 2>&1 | tail -15 +``` + +Expected: All FAIL with `ToolError: Invalid action` or collection errors. + +--- + +### Task 4: Implement flash_backup in storage.py (GREEN) + +**Files:** +- Modify: `unraid_mcp/tools/storage.py` + +- [ ] **Step 3: Add MUTATIONS dict and DESTRUCTIVE_ACTIONS** (insert after the QUERIES dict closing `}`) + +```python +MUTATIONS: dict[str, str] = { + "flash_backup": """ + mutation InitiateFlashBackup($input: InitiateFlashBackupInput!) { + initiateFlashBackup(input: $input) { status jobId } + } + """, +} + +DESTRUCTIVE_ACTIONS = {"flash_backup"} +``` + +- [ ] **Step 4: Update ALL_ACTIONS, STORAGE_ACTIONS Literal, and sync guard** + +Replace: +```python +ALL_ACTIONS = set(QUERIES) +``` +With: +```python +ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) +``` + +Replace the STORAGE_ACTIONS Literal: +```python +STORAGE_ACTIONS = Literal[ + "shares", + "disks", + "disk_details", + "unassigned", + "log_files", + "logs", + "flash_backup", +] +``` + +- [ ] **Step 5: Add params and `confirm` to the tool function signature** + +Add after `tail_lines`: +```python + confirm: bool = False, + remote_name: str | None = None, + source_path: str | None = None, + destination_path: str | None = None, + backup_options: dict[str, Any] | None = None, +``` + +- [ ] **Step 6: Add validation and handler branch** (insert before the final `raise ToolError(...)`) + +Add destructive guard after the action validation: +```python + if action in DESTRUCTIVE_ACTIONS and not confirm: + raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") +``` + +Add handler: +```python + if action == "flash_backup": + if not remote_name: + raise ToolError("remote_name is required for 'flash_backup' action") + if not source_path: + raise ToolError("source_path is required for 'flash_backup' action") + if not destination_path: + raise ToolError("destination_path is required for 'flash_backup' action") + input_data: dict[str, Any] = { + "remoteName": remote_name, + "sourcePath": source_path, + "destinationPath": destination_path, + } + if backup_options is not None: + input_data["options"] = backup_options + data = await make_graphql_request(MUTATIONS["flash_backup"], {"input": input_data}) + return {"success": True, "action": "flash_backup", "data": data.get("initiateFlashBackup")} +``` + +Update the docstring to include: +``` + flash_backup - Initiate flash drive backup via rclone (requires remote_name, source_path, destination_path, confirm=True) +``` + +- [ ] **Step 7: Run all storage tests** + +```bash +uv run pytest tests/test_storage.py -v 2>&1 | tail -20 +``` + +Expected: All PASS. + +- [ ] **Step 8: Lint** + +```bash +uv run ruff check unraid_mcp/tools/storage.py && uv run ruff format --check unraid_mcp/tools/storage.py +``` + +- [ ] **Step 9: Commit** + +```bash +git add unraid_mcp/tools/storage.py tests/test_storage.py +git commit -m "feat: add flash_backup mutation to storage tool" +``` + +--- + +### Task 5: Test info mutations — update_server, update_ssh (RED) + +**Files:** +- Modify: `tests/test_info.py` + +- [ ] **Step 10: Append new test class to `tests/test_info.py`** + +```python +class TestInfoMutations: + async def test_update_server_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "updateServerIdentity": {"id": "srv:1", "name": "tootie", "comment": "main server", "status": "ONLINE"} + } + tool_fn = _make_tool() + result = await tool_fn(action="update_server", server_name="tootie", server_comment="main server") + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1]["name"] == "tootie" + assert call_args[0][1]["comment"] == "main server" + + async def test_update_server_requires_name(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="server_name"): + await tool_fn(action="update_server") + + async def test_update_server_with_sys_model(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"updateServerIdentity": {"id": "srv:1", "name": "tootie", "comment": None, "status": "ONLINE"}} + tool_fn = _make_tool() + await tool_fn(action="update_server", server_name="tootie", sys_model="Custom Server") + call_args = _mock_graphql.call_args + assert call_args[0][1]["sysModel"] == "Custom Server" + + async def test_update_ssh_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "updateSshSettings": {"id": "vars:1", "useSsh": True, "portssh": 22} + } + tool_fn = _make_tool() + result = await tool_fn(action="update_ssh", ssh_enabled=True, ssh_port=22) + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1]["input"]["enabled"] is True + assert call_args[0][1]["input"]["port"] == 22 + + async def test_update_ssh_requires_enabled(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="ssh_enabled"): + await tool_fn(action="update_ssh", ssh_port=22) + + async def test_update_ssh_requires_port(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="ssh_port"): + await tool_fn(action="update_ssh", ssh_enabled=True) +``` + +- [ ] **Step 11: Run tests to confirm they fail** + +```bash +uv run pytest tests/test_info.py::TestInfoMutations -v 2>&1 | tail -15 +``` + +Expected: All FAIL. + +--- + +### Task 6: Implement update_server and update_ssh in info.py (GREEN) + +**Files:** +- Modify: `unraid_mcp/tools/info.py` + +- [ ] **Step 12: Add MUTATIONS dict** (insert after the QUERIES dict closing `}`) + +```python +MUTATIONS: dict[str, str] = { + "update_server": """ + mutation UpdateServerIdentity($name: String!, $comment: String, $sysModel: String) { + updateServerIdentity(name: $name, comment: $comment, sysModel: $sysModel) { + id name comment status + } + } + """, + "update_ssh": """ + mutation UpdateSshSettings($input: UpdateSshInput!) { + updateSshSettings(input: $input) { id useSsh portssh } + } + """, +} +``` + +- [ ] **Step 13: Update ALL_ACTIONS, INFO_ACTIONS Literal, and sync guard** + +Replace: +```python +ALL_ACTIONS = set(QUERIES) +``` +With: +```python +ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) +``` + +Add `"update_server"` and `"update_ssh"` to the `INFO_ACTIONS` Literal (append before the closing bracket): +```python + "update_server", + "update_ssh", +``` + +Update the sync guard error message: +```python +if set(get_args(INFO_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(INFO_ACTIONS)) + _extra = set(get_args(INFO_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"INFO_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) +``` + +- [ ] **Step 14: Add params to the tool function signature** + +Add after `device_id`: +```python + server_name: str | None = None, + server_comment: str | None = None, + sys_model: str | None = None, + ssh_enabled: bool | None = None, + ssh_port: int | None = None, +``` + +- [ ] **Step 15: Add handler branches** (insert before the final `raise ToolError(...)`) + +```python + if action == "update_server": + if server_name is None: + raise ToolError("server_name is required for 'update_server' action") + variables = {"name": server_name} + if server_comment is not None: + variables["comment"] = server_comment + if sys_model is not None: + variables["sysModel"] = sys_model + data = await make_graphql_request(MUTATIONS["update_server"], variables) + return {"success": True, "action": "update_server", "data": data.get("updateServerIdentity")} + + if action == "update_ssh": + if ssh_enabled is None: + raise ToolError("ssh_enabled is required for 'update_ssh' action") + if ssh_port is None: + raise ToolError("ssh_port is required for 'update_ssh' action") + data = await make_graphql_request( + MUTATIONS["update_ssh"], + {"input": {"enabled": ssh_enabled, "port": ssh_port}}, + ) + return {"success": True, "action": "update_ssh", "data": data.get("updateSshSettings")} +``` + +Update docstring to add: +``` + update_server - Update server name, comment, and model (requires server_name) + update_ssh - Enable/disable SSH and set port (requires ssh_enabled, ssh_port) +``` + +- [ ] **Step 16: Run all info tests** + +```bash +uv run pytest tests/test_info.py -v 2>&1 | tail -20 +``` + +Expected: All PASS. + +- [ ] **Step 17: Lint** + +```bash +uv run ruff check unraid_mcp/tools/info.py && uv run ruff format --check unraid_mcp/tools/info.py +``` + +- [ ] **Step 18: Commit** + +```bash +git add unraid_mcp/tools/info.py tests/test_info.py +git commit -m "feat: add update_server and update_ssh mutations to info tool" +``` + +--- + +## Chunk 3: Docker — 11 organizer mutations + +### Task 7: Test the 11 Docker organizer mutations (RED) + +**Files:** +- Modify: `tests/test_docker.py` + +- [ ] **Step 1: Append organizer test class to `tests/test_docker.py`** + +```python +# Helper fixture for organizer response +_ORGANIZER_RESPONSE = { + "version": 1.0, + "views": [{"id": "default", "name": "Default", "rootId": "root", "flatEntries": []}], +} + + +class TestDockerOrganizerMutations: + async def test_create_folder_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"createDockerFolder": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + result = await tool_fn(action="create_folder", folder_name="Media Apps") + assert result["success"] is True + + async def test_create_folder_requires_name(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="folder_name"): + await tool_fn(action="create_folder") + + async def test_create_folder_passes_children(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"createDockerFolder": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + await tool_fn(action="create_folder", folder_name="Media", children_ids=["c1", "c2"]) + call_args = _mock_graphql.call_args + assert call_args[0][1]["childrenIds"] == ["c1", "c2"] + + async def test_set_folder_children_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"setDockerFolderChildren": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + result = await tool_fn(action="set_folder_children", children_ids=["c1"]) + assert result["success"] is True + + async def test_set_folder_children_requires_children(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="children_ids"): + await tool_fn(action="set_folder_children") + + async def test_delete_entries_requires_confirm(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destructive"): + await tool_fn(action="delete_entries", entry_ids=["e1"]) + + async def test_delete_entries_requires_ids(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="entry_ids"): + await tool_fn(action="delete_entries", confirm=True) + + async def test_delete_entries_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"deleteDockerEntries": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + result = await tool_fn(action="delete_entries", entry_ids=["e1", "e2"], confirm=True) + assert result["success"] is True + + async def test_move_to_folder_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"moveDockerEntriesToFolder": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + result = await tool_fn( + action="move_to_folder", + source_entry_ids=["e1"], + destination_folder_id="folder-1", + ) + assert result["success"] is True + + async def test_move_to_folder_requires_source_ids(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="source_entry_ids"): + await tool_fn(action="move_to_folder", destination_folder_id="f1") + + async def test_move_to_folder_requires_destination(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destination_folder_id"): + await tool_fn(action="move_to_folder", source_entry_ids=["e1"]) + + async def test_move_to_position_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"moveDockerItemsToPosition": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + result = await tool_fn( + action="move_to_position", + source_entry_ids=["e1"], + destination_folder_id="folder-1", + position=2.0, + ) + assert result["success"] is True + + async def test_move_to_position_requires_position(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="position"): + await tool_fn( + action="move_to_position", + source_entry_ids=["e1"], + destination_folder_id="f1", + ) + + async def test_rename_folder_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"renameDockerFolder": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + result = await tool_fn(action="rename_folder", folder_id="f1", new_folder_name="New Name") + assert result["success"] is True + + async def test_rename_folder_requires_folder_id(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="folder_id"): + await tool_fn(action="rename_folder", new_folder_name="New") + + async def test_rename_folder_requires_new_name(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="new_folder_name"): + await tool_fn(action="rename_folder", folder_id="f1") + + async def test_create_folder_with_items_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"createDockerFolderWithItems": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + result = await tool_fn(action="create_folder_with_items", folder_name="New Folder") + assert result["success"] is True + + async def test_create_folder_with_items_requires_name(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="folder_name"): + await tool_fn(action="create_folder_with_items") + + async def test_update_view_prefs_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"updateDockerViewPreferences": _ORGANIZER_RESPONSE} + tool_fn = _make_tool() + result = await tool_fn(action="update_view_prefs", view_prefs={"sort": "name"}) + assert result["success"] is True + + async def test_update_view_prefs_requires_prefs(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="view_prefs"): + await tool_fn(action="update_view_prefs") + + async def test_sync_templates_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "syncDockerTemplatePaths": {"scanned": 10, "matched": 8, "skipped": 2, "errors": []} + } + tool_fn = _make_tool() + result = await tool_fn(action="sync_templates") + assert result["success"] is True + + async def test_reset_template_mappings_requires_confirm(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destructive"): + await tool_fn(action="reset_template_mappings") + + async def test_reset_template_mappings_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"resetDockerTemplateMappings": True} + tool_fn = _make_tool() + result = await tool_fn(action="reset_template_mappings", confirm=True) + assert result["success"] is True + + async def test_refresh_digests_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"refreshDockerDigests": True} + tool_fn = _make_tool() + result = await tool_fn(action="refresh_digests") + assert result["success"] is True +``` + +- [ ] **Step 2: Run tests to confirm they fail** + +```bash +uv run pytest tests/test_docker.py::TestDockerOrganizerMutations -v 2>&1 | tail -20 +``` + +Expected: All FAIL. + +--- + +### Task 8: Implement 11 organizer mutations in docker.py (GREEN) + +**Files:** +- Modify: `unraid_mcp/tools/docker.py` + +- [ ] **Step 3: Add 11 entries to the MUTATIONS dict** (append inside the existing MUTATIONS dict before its closing `}`) + +```python + "create_folder": """ + mutation CreateDockerFolder($name: String!, $parentId: String, $childrenIds: [String!]) { + createDockerFolder(name: $name, parentId: $parentId, childrenIds: $childrenIds) { + version views { id name rootId flatEntries { id type name parentId depth position path hasChildren childrenIds } } + } + } + """, + "set_folder_children": """ + mutation SetDockerFolderChildren($folderId: String, $childrenIds: [String!]!) { + setDockerFolderChildren(folderId: $folderId, childrenIds: $childrenIds) { + version views { id name rootId flatEntries { id type name parentId depth position path hasChildren childrenIds } } + } + } + """, + "delete_entries": """ + mutation DeleteDockerEntries($entryIds: [String!]!) { + deleteDockerEntries(entryIds: $entryIds) { + version views { id name rootId flatEntries { id type name parentId depth position path hasChildren childrenIds } } + } + } + """, + "move_to_folder": """ + mutation MoveDockerEntriesToFolder($sourceEntryIds: [String!]!, $destinationFolderId: String!) { + moveDockerEntriesToFolder(sourceEntryIds: $sourceEntryIds, destinationFolderId: $destinationFolderId) { + version views { id name rootId flatEntries { id type name parentId depth position path hasChildren childrenIds } } + } + } + """, + "move_to_position": """ + mutation MoveDockerItemsToPosition($sourceEntryIds: [String!]!, $destinationFolderId: String!, $position: Float!) { + moveDockerItemsToPosition(sourceEntryIds: $sourceEntryIds, destinationFolderId: $destinationFolderId, position: $position) { + version views { id name rootId flatEntries { id type name parentId depth position path hasChildren childrenIds } } + } + } + """, + "rename_folder": """ + mutation RenameDockerFolder($folderId: String!, $newName: String!) { + renameDockerFolder(folderId: $folderId, newName: $newName) { + version views { id name rootId flatEntries { id type name parentId depth position path hasChildren childrenIds } } + } + } + """, + "create_folder_with_items": """ + mutation CreateDockerFolderWithItems($name: String!, $parentId: String, $sourceEntryIds: [String!], $position: Float) { + createDockerFolderWithItems(name: $name, parentId: $parentId, sourceEntryIds: $sourceEntryIds, position: $position) { + version views { id name rootId flatEntries { id type name parentId depth position path hasChildren childrenIds } } + } + } + """, + "update_view_prefs": """ + mutation UpdateDockerViewPreferences($viewId: String, $prefs: JSON!) { + updateDockerViewPreferences(viewId: $viewId, prefs: $prefs) { + version views { id name rootId } + } + } + """, + "sync_templates": """ + mutation SyncDockerTemplatePaths { + syncDockerTemplatePaths { scanned matched skipped errors } + } + """, + "reset_template_mappings": """ + mutation ResetDockerTemplateMappings { + resetDockerTemplateMappings + } + """, + "refresh_digests": """ + mutation RefreshDockerDigests { + refreshDockerDigests + } + """, +``` + +- [ ] **Step 4: Update DESTRUCTIVE_ACTIONS** + +Replace: +```python +DESTRUCTIVE_ACTIONS = {"remove", "update_all"} +``` +With: +```python +DESTRUCTIVE_ACTIONS = {"remove", "update_all", "delete_entries", "reset_template_mappings"} +``` + +- [ ] **Step 5: Update DOCKER_ACTIONS Literal** + +Append 11 new strings to the Literal (before the closing `]`): +```python + "create_folder", + "set_folder_children", + "delete_entries", + "move_to_folder", + "move_to_position", + "rename_folder", + "create_folder_with_items", + "update_view_prefs", + "sync_templates", + "reset_template_mappings", + "refresh_digests", +``` + +- [ ] **Step 6: Add new params to the tool function signature** + +Add after `tail_lines`: +```python + folder_name: str | None = None, + folder_id: str | None = None, + parent_id: str | None = None, + children_ids: list[str] | None = None, + entry_ids: list[str] | None = None, + source_entry_ids: list[str] | None = None, + destination_folder_id: str | None = None, + position: float | None = None, + new_folder_name: str | None = None, + view_id: str = "default", + view_prefs: dict[str, Any] | None = None, +``` + +- [ ] **Step 7: Add handler branches** (insert before the final `raise ToolError(...)`) + +```python + # --- Docker organizer mutations --- + if action == "create_folder": + if not folder_name: + raise ToolError("folder_name is required for 'create_folder' action") + vars_: dict[str, Any] = {"name": folder_name} + if parent_id is not None: + vars_["parentId"] = parent_id + if children_ids is not None: + vars_["childrenIds"] = children_ids + data = await make_graphql_request(MUTATIONS["create_folder"], vars_) + return {"success": True, "action": "create_folder", "organizer": data.get("createDockerFolder")} + + if action == "set_folder_children": + if not children_ids: + raise ToolError("children_ids is required for 'set_folder_children' action") + vars_ = {"childrenIds": children_ids} + if folder_id is not None: + vars_["folderId"] = folder_id + data = await make_graphql_request(MUTATIONS["set_folder_children"], vars_) + return {"success": True, "action": "set_folder_children", "organizer": data.get("setDockerFolderChildren")} + + if action == "delete_entries": + if not entry_ids: + raise ToolError("entry_ids is required for 'delete_entries' action") + data = await make_graphql_request(MUTATIONS["delete_entries"], {"entryIds": entry_ids}) + return {"success": True, "action": "delete_entries", "organizer": data.get("deleteDockerEntries")} + + if action == "move_to_folder": + if not source_entry_ids: + raise ToolError("source_entry_ids is required for 'move_to_folder' action") + if not destination_folder_id: + raise ToolError("destination_folder_id is required for 'move_to_folder' action") + data = await make_graphql_request( + MUTATIONS["move_to_folder"], + {"sourceEntryIds": source_entry_ids, "destinationFolderId": destination_folder_id}, + ) + return {"success": True, "action": "move_to_folder", "organizer": data.get("moveDockerEntriesToFolder")} + + if action == "move_to_position": + if not source_entry_ids: + raise ToolError("source_entry_ids is required for 'move_to_position' action") + if not destination_folder_id: + raise ToolError("destination_folder_id is required for 'move_to_position' action") + if position is None: + raise ToolError("position is required for 'move_to_position' action") + data = await make_graphql_request( + MUTATIONS["move_to_position"], + { + "sourceEntryIds": source_entry_ids, + "destinationFolderId": destination_folder_id, + "position": position, + }, + ) + return {"success": True, "action": "move_to_position", "organizer": data.get("moveDockerItemsToPosition")} + + if action == "rename_folder": + if not folder_id: + raise ToolError("folder_id is required for 'rename_folder' action") + if not new_folder_name: + raise ToolError("new_folder_name is required for 'rename_folder' action") + data = await make_graphql_request( + MUTATIONS["rename_folder"], {"folderId": folder_id, "newName": new_folder_name} + ) + return {"success": True, "action": "rename_folder", "organizer": data.get("renameDockerFolder")} + + if action == "create_folder_with_items": + if not folder_name: + raise ToolError("folder_name is required for 'create_folder_with_items' action") + vars_ = {"name": folder_name} + if parent_id is not None: + vars_["parentId"] = parent_id + if entry_ids is not None: + vars_["sourceEntryIds"] = entry_ids + if position is not None: + vars_["position"] = position + data = await make_graphql_request(MUTATIONS["create_folder_with_items"], vars_) + return {"success": True, "action": "create_folder_with_items", "organizer": data.get("createDockerFolderWithItems")} + + if action == "update_view_prefs": + if view_prefs is None: + raise ToolError("view_prefs is required for 'update_view_prefs' action") + data = await make_graphql_request( + MUTATIONS["update_view_prefs"], {"viewId": view_id, "prefs": view_prefs} + ) + return {"success": True, "action": "update_view_prefs", "organizer": data.get("updateDockerViewPreferences")} + + if action == "sync_templates": + data = await make_graphql_request(MUTATIONS["sync_templates"]) + return {"success": True, "action": "sync_templates", "result": data.get("syncDockerTemplatePaths")} + + if action == "reset_template_mappings": + data = await make_graphql_request(MUTATIONS["reset_template_mappings"]) + return {"success": True, "action": "reset_template_mappings", "result": data.get("resetDockerTemplateMappings")} + + if action == "refresh_digests": + data = await make_graphql_request(MUTATIONS["refresh_digests"]) + return {"success": True, "action": "refresh_digests", "result": data.get("refreshDockerDigests")} +``` + +Update docstring to add the 11 new actions. + +- [ ] **Step 8: Run all docker tests** + +```bash +uv run pytest tests/test_docker.py -v 2>&1 | tail -30 +``` + +Expected: All PASS. + +- [ ] **Step 9: Lint** + +```bash +uv run ruff check unraid_mcp/tools/docker.py && uv run ruff format --check unraid_mcp/tools/docker.py +``` + +- [ ] **Step 10: Commit** + +```bash +git add unraid_mcp/tools/docker.py tests/test_docker.py +git commit -m "feat: add 11 Docker organizer mutations (folders, positions, templates, digests)" +``` + +--- + +## Chunk 4: New settings tool — 9 mutations + +### Task 9: Create test_settings.py (RED) + +**Files:** +- Create: `tests/test_settings.py` + +- [ ] **Step 1: Create `tests/test_settings.py`** + +```python +"""Tests for unraid_settings tool.""" + +from collections.abc import Generator +from unittest.mock import AsyncMock, patch + +import pytest +from conftest import make_tool_fn + +from unraid_mcp.core.exceptions import ToolError + + +@pytest.fixture +def _mock_graphql() -> Generator[AsyncMock, None, None]: + with patch( + "unraid_mcp.tools.settings.make_graphql_request", new_callable=AsyncMock + ) as mock: + yield mock + + +def _make_tool(): + return make_tool_fn( + "unraid_mcp.tools.settings", "register_settings_tool", "unraid_settings" + ) + + +class TestSettingsValidation: + async def test_invalid_action_rejected(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="Invalid action"): + await tool_fn(action="nonexistent") # type: ignore[arg-type] + + async def test_configure_ups_requires_confirm(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destructive"): + await tool_fn(action="configure_ups", ups_config={"service": "ENABLE"}) + + async def test_setup_remote_access_requires_confirm(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destructive"): + await tool_fn(action="setup_remote_access", access_type="ALWAYS") + + async def test_enable_dynamic_remote_access_requires_confirm( + self, _mock_graphql: AsyncMock + ) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="destructive"): + await tool_fn( + action="enable_dynamic_remote_access", + access_url_type="LAN", + dynamic_enabled=True, + ) + + +class TestSettingsUpdate: + async def test_update_requires_settings_input(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="settings_input"): + await tool_fn(action="update") + + async def test_update_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "updateSettings": {"restartRequired": False, "values": {"key": "val"}, "warnings": []} + } + tool_fn = _make_tool() + result = await tool_fn(action="update", settings_input={"shareSmbEnabled": True}) + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1]["input"] == {"shareSmbEnabled": True} + + +class TestTemperatureConfig: + async def test_update_temperature_requires_config(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="temperature_config"): + await tool_fn(action="update_temperature") + + async def test_update_temperature_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"updateTemperatureConfig": True} + tool_fn = _make_tool() + result = await tool_fn( + action="update_temperature", + temperature_config={"enabled": True, "polling_interval": 30}, + ) + assert result["success"] is True + + +class TestSystemTime: + async def test_update_time_requires_at_least_one_field( + self, _mock_graphql: AsyncMock + ) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="at least one"): + await tool_fn(action="update_time") + + async def test_update_time_timezone_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "updateSystemTime": { + "currentTime": "2026-03-13T12:00:00Z", + "timeZone": "America/New_York", + "useNtp": True, + "ntpServers": ["pool.ntp.org"], + } + } + tool_fn = _make_tool() + result = await tool_fn(action="update_time", time_zone="America/New_York") + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1]["input"]["timeZone"] == "America/New_York" + + async def test_update_time_ntp_servers(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"updateSystemTime": {"currentTime": "2026-03-13T12:00:00Z", "timeZone": "UTC", "useNtp": True, "ntpServers": ["0.pool.ntp.org"]}} + tool_fn = _make_tool() + await tool_fn(action="update_time", use_ntp=True, ntp_servers=["0.pool.ntp.org"]) + call_args = _mock_graphql.call_args + assert call_args[0][1]["input"]["useNtp"] is True + assert call_args[0][1]["input"]["ntpServers"] == ["0.pool.ntp.org"] + + +class TestUpsConfig: + async def test_configure_ups_requires_config(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="ups_config"): + await tool_fn(action="configure_ups", confirm=True) + + async def test_configure_ups_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"configureUps": True} + tool_fn = _make_tool() + result = await tool_fn( + action="configure_ups", + confirm=True, + ups_config={"service": "ENABLE", "upsCable": "USB"}, + ) + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1]["config"]["service"] == "ENABLE" + + +class TestApiSettings: + async def test_update_api_requires_at_least_one_field( + self, _mock_graphql: AsyncMock + ) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="at least one"): + await tool_fn(action="update_api") + + async def test_update_api_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "updateApiSettings": {"accessType": "ALWAYS", "forwardType": None, "port": None} + } + tool_fn = _make_tool() + result = await tool_fn(action="update_api", access_type="ALWAYS") + assert result["success"] is True + + +class TestConnectActions: + async def test_connect_sign_in_requires_api_key(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="api_key"): + await tool_fn(action="connect_sign_in") + + async def test_connect_sign_in_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"connectSignIn": True} + tool_fn = _make_tool() + result = await tool_fn( + action="connect_sign_in", + api_key="secret-key", + username="jmagar", + email="jmagar@gmail.com", + ) + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1]["input"]["apiKey"] == "secret-key" + assert call_args[0][1]["input"]["userInfo"]["preferred_username"] == "jmagar" + + async def test_connect_sign_out_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"connectSignOut": True} + tool_fn = _make_tool() + result = await tool_fn(action="connect_sign_out") + assert result["success"] is True + + +class TestRemoteAccess: + async def test_setup_remote_access_requires_access_type( + self, _mock_graphql: AsyncMock + ) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="access_type"): + await tool_fn(action="setup_remote_access", confirm=True) + + async def test_setup_remote_access_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"setupRemoteAccess": True} + tool_fn = _make_tool() + result = await tool_fn( + action="setup_remote_access", confirm=True, access_type="ALWAYS" + ) + assert result["success"] is True + + async def test_enable_dynamic_remote_access_requires_type_and_enabled( + self, _mock_graphql: AsyncMock + ) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="access_url_type"): + await tool_fn(action="enable_dynamic_remote_access", confirm=True) + + async def test_enable_dynamic_remote_access_requires_enabled_field( + self, _mock_graphql: AsyncMock + ) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="dynamic_enabled"): + await tool_fn( + action="enable_dynamic_remote_access", confirm=True, access_url_type="LAN" + ) + + async def test_enable_dynamic_remote_access_success( + self, _mock_graphql: AsyncMock + ) -> None: + _mock_graphql.return_value = {"enableDynamicRemoteAccess": True} + tool_fn = _make_tool() + result = await tool_fn( + action="enable_dynamic_remote_access", + confirm=True, + access_url_type="LAN", + access_url_ipv4="10.1.0.2", + dynamic_enabled=True, + ) + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1]["input"]["enabled"] is True + assert call_args[0][1]["input"]["url"]["type"] == "LAN" +``` + +- [ ] **Step 2: Run tests to confirm they fail (import error expected)** + +```bash +uv run pytest tests/test_settings.py -v 2>&1 | tail -10 +``` + +Expected: `ModuleNotFoundError` or `ImportError` — the file doesn't exist yet. + +--- + +### Task 10: Create tools/settings.py (GREEN) + +**Files:** +- Create: `unraid_mcp/tools/settings.py` + +- [ ] **Step 3: Create `unraid_mcp/tools/settings.py`** + +```python +"""System settings, time, UPS, and remote access mutations. + +Provides the `unraid_settings` tool with 9 actions for updating system +configuration, time settings, UPS, API settings, and Unraid Connect. +""" + +from typing import Any, Literal, get_args + +from fastmcp import FastMCP + +from ..config.logging import logger +from ..core.client import make_graphql_request +from ..core.exceptions import ToolError, tool_error_handler + + +MUTATIONS: dict[str, str] = { + "update": """ + mutation UpdateSettings($input: JSON!) { + updateSettings(input: $input) { restartRequired values warnings } + } + """, + "update_temperature": """ + mutation UpdateTemperatureConfig($input: TemperatureConfigInput!) { + updateTemperatureConfig(input: $input) + } + """, + "update_time": """ + mutation UpdateSystemTime($input: UpdateSystemTimeInput!) { + updateSystemTime(input: $input) { currentTime timeZone useNtp ntpServers } + } + """, + "configure_ups": """ + mutation ConfigureUps($config: UPSConfigInput!) { + configureUps(config: $config) + } + """, + "update_api": """ + mutation UpdateApiSettings($input: ConnectSettingsInput!) { + updateApiSettings(input: $input) { accessType forwardType port } + } + """, + "connect_sign_in": """ + mutation ConnectSignIn($input: ConnectSignInInput!) { + connectSignIn(input: $input) + } + """, + "connect_sign_out": """ + mutation ConnectSignOut { + connectSignOut + } + """, + "setup_remote_access": """ + mutation SetupRemoteAccess($input: SetupRemoteAccessInput!) { + setupRemoteAccess(input: $input) + } + """, + "enable_dynamic_remote_access": """ + mutation EnableDynamicRemoteAccess($input: EnableDynamicRemoteAccessInput!) { + enableDynamicRemoteAccess(input: $input) + } + """, +} + +DESTRUCTIVE_ACTIONS = {"configure_ups", "setup_remote_access", "enable_dynamic_remote_access"} +ALL_ACTIONS = set(MUTATIONS) + +SETTINGS_ACTIONS = Literal[ + "update", + "update_temperature", + "update_time", + "configure_ups", + "update_api", + "connect_sign_in", + "connect_sign_out", + "setup_remote_access", + "enable_dynamic_remote_access", +] + +if set(get_args(SETTINGS_ACTIONS)) != ALL_ACTIONS: + _missing = ALL_ACTIONS - set(get_args(SETTINGS_ACTIONS)) + _extra = set(get_args(SETTINGS_ACTIONS)) - ALL_ACTIONS + raise RuntimeError( + f"SETTINGS_ACTIONS and ALL_ACTIONS are out of sync. " + f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}" + ) + + +def register_settings_tool(mcp: FastMCP) -> None: + """Register the unraid_settings tool with the FastMCP instance.""" + + @mcp.tool() + async def unraid_settings( + action: SETTINGS_ACTIONS, + confirm: bool = False, + # updateSettings + settings_input: dict[str, Any] | None = None, + # updateTemperatureConfig + temperature_config: dict[str, Any] | None = None, + # updateSystemTime + time_zone: str | None = None, + use_ntp: bool | None = None, + ntp_servers: list[str] | None = None, + manual_datetime: str | None = None, + # configureUps + ups_config: dict[str, Any] | None = None, + # updateApiSettings / setupRemoteAccess + access_type: str | None = None, + forward_type: str | None = None, + port: int | None = None, + # connectSignIn + api_key: str | None = None, + username: str | None = None, + email: str | None = None, + avatar: str | None = None, + # enableDynamicRemoteAccess + access_url_type: str | None = None, + access_url_name: str | None = None, + access_url_ipv4: str | None = None, + access_url_ipv6: str | None = None, + dynamic_enabled: bool | None = None, + ) -> dict[str, Any]: + """Update Unraid system settings, time, UPS, and remote access configuration. + + Actions: + update - Update system settings (requires settings_input dict) + update_temperature - Update temperature sensor config (requires temperature_config dict) + update_time - Update system time/timezone/NTP (requires at least one of: time_zone, use_ntp, ntp_servers, manual_datetime) + configure_ups - Configure UPS monitoring (requires ups_config dict, confirm=True) + update_api - Update API/Connect settings (requires at least one of: access_type, forward_type, port) + connect_sign_in - Sign in to Unraid Connect (requires api_key) + connect_sign_out - Sign out from Unraid Connect + setup_remote_access - Configure remote access (requires access_type, confirm=True) + enable_dynamic_remote_access - Enable/disable dynamic remote access (requires access_url_type, dynamic_enabled, confirm=True) + """ + if action not in ALL_ACTIONS: + raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") + + if action in DESTRUCTIVE_ACTIONS and not confirm: + raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") + + with tool_error_handler("settings", action, logger): + logger.info(f"Executing unraid_settings action={action}") + + if action == "update": + if settings_input is None: + raise ToolError("settings_input is required for 'update' action") + data = await make_graphql_request(MUTATIONS["update"], {"input": settings_input}) + return {"success": True, "action": "update", "data": data.get("updateSettings")} + + if action == "update_temperature": + if temperature_config is None: + raise ToolError("temperature_config is required for 'update_temperature' action") + data = await make_graphql_request( + MUTATIONS["update_temperature"], {"input": temperature_config} + ) + return {"success": True, "action": "update_temperature", "result": data.get("updateTemperatureConfig")} + + if action == "update_time": + time_input: dict[str, Any] = {} + if time_zone is not None: + time_input["timeZone"] = time_zone + if use_ntp is not None: + time_input["useNtp"] = use_ntp + if ntp_servers is not None: + time_input["ntpServers"] = ntp_servers + if manual_datetime is not None: + time_input["manualDateTime"] = manual_datetime + if not time_input: + raise ToolError( + "update_time requires at least one of: time_zone, use_ntp, ntp_servers, manual_datetime" + ) + data = await make_graphql_request(MUTATIONS["update_time"], {"input": time_input}) + return {"success": True, "action": "update_time", "data": data.get("updateSystemTime")} + + if action == "configure_ups": + if ups_config is None: + raise ToolError("ups_config is required for 'configure_ups' action") + data = await make_graphql_request(MUTATIONS["configure_ups"], {"config": ups_config}) + return {"success": True, "action": "configure_ups", "result": data.get("configureUps")} + + if action == "update_api": + api_input: dict[str, Any] = {} + if access_type is not None: + api_input["accessType"] = access_type + if forward_type is not None: + api_input["forwardType"] = forward_type + if port is not None: + api_input["port"] = port + if not api_input: + raise ToolError( + "update_api requires at least one of: access_type, forward_type, port" + ) + data = await make_graphql_request(MUTATIONS["update_api"], {"input": api_input}) + return {"success": True, "action": "update_api", "data": data.get("updateApiSettings")} + + if action == "connect_sign_in": + if not api_key: + raise ToolError("api_key is required for 'connect_sign_in' action") + sign_in_input: dict[str, Any] = {"apiKey": api_key} + if username or email: + user_info: dict[str, Any] = {} + if username: + user_info["preferred_username"] = username + if email: + user_info["email"] = email + if avatar: + user_info["avatar"] = avatar + sign_in_input["userInfo"] = user_info + data = await make_graphql_request( + MUTATIONS["connect_sign_in"], {"input": sign_in_input} + ) + return {"success": True, "action": "connect_sign_in", "result": data.get("connectSignIn")} + + if action == "connect_sign_out": + data = await make_graphql_request(MUTATIONS["connect_sign_out"]) + return {"success": True, "action": "connect_sign_out", "result": data.get("connectSignOut")} + + if action == "setup_remote_access": + if not access_type: + raise ToolError("access_type is required for 'setup_remote_access' action") + remote_input: dict[str, Any] = {"accessType": access_type} + if forward_type is not None: + remote_input["forwardType"] = forward_type + if port is not None: + remote_input["port"] = port + data = await make_graphql_request( + MUTATIONS["setup_remote_access"], {"input": remote_input} + ) + return {"success": True, "action": "setup_remote_access", "result": data.get("setupRemoteAccess")} + + if action == "enable_dynamic_remote_access": + if not access_url_type: + raise ToolError("access_url_type is required for 'enable_dynamic_remote_access' action") + if dynamic_enabled is None: + raise ToolError("dynamic_enabled is required for 'enable_dynamic_remote_access' action") + url_input: dict[str, Any] = {"type": access_url_type} + if access_url_name is not None: + url_input["name"] = access_url_name + if access_url_ipv4 is not None: + url_input["ipv4"] = access_url_ipv4 + if access_url_ipv6 is not None: + url_input["ipv6"] = access_url_ipv6 + data = await make_graphql_request( + MUTATIONS["enable_dynamic_remote_access"], + {"input": {"url": url_input, "enabled": dynamic_enabled}}, + ) + return {"success": True, "action": "enable_dynamic_remote_access", "result": data.get("enableDynamicRemoteAccess")} + + raise ToolError(f"Unhandled action '{action}' — this is a bug") + + logger.info("Settings tool registered successfully") +``` + +- [ ] **Step 4: Run settings tests** + +```bash +uv run pytest tests/test_settings.py -v 2>&1 | tail -30 +``` + +Expected: All PASS. + +- [ ] **Step 5: Lint** + +```bash +uv run ruff check unraid_mcp/tools/settings.py && uv run ruff format --check unraid_mcp/tools/settings.py +``` + +- [ ] **Step 6: Commit** + +```bash +git add unraid_mcp/tools/settings.py tests/test_settings.py +git commit -m "feat: add new unraid_settings tool with 9 mutations (settings, time, UPS, connect, remote access)" +``` + +--- + +### Task 11: Register settings tool in server.py + +**Files:** +- Modify: `unraid_mcp/server.py` + +- [ ] **Step 7: Add import** (after the existing `register_users_tool` import line) + +```python +from .tools.settings import register_settings_tool +``` + +- [ ] **Step 8: Add to registrars list** (append inside the `registrars` list) + +```python + register_settings_tool, +``` + +- [ ] **Step 9: Verify server still starts cleanly** + +```bash +uv run python -c "from unraid_mcp.server import register_all_modules, mcp; register_all_modules(); print('OK')" 2>&1 +``` + +Expected: `OK` with no errors (note: this will fail on missing env vars — that's fine, we're only checking imports and registration). + +Actually use: +```bash +uv run python -c " +import os; os.environ.setdefault('UNRAID_API_URL','http://fake'); os.environ.setdefault('UNRAID_API_KEY','fake') +from unraid_mcp.server import register_all_modules, mcp +register_all_modules() +tools = list(mcp._tool_manager._tools.keys()) +print(f'Registered {len(tools)} tools: {tools}') +" +``` + +Expected: Output includes `unraid_settings` in the tools list. + +- [ ] **Step 10: Run full test suite to confirm nothing broken** + +```bash +uv run pytest --tb=short -q 2>&1 | tail -15 +``` + +Expected: All tests PASS (598 original + ~63 new = ~661 total). + +- [ ] **Step 11: Lint all modified files** + +```bash +uv run ruff check unraid_mcp/ && uv run ruff format --check unraid_mcp/ +``` + +- [ ] **Step 12: Commit** + +```bash +git add unraid_mcp/server.py +git commit -m "feat: register unraid_settings tool in server — 11 tools, 104 actions total" +``` + +--- + +## Final Verification + +- [ ] **Count total actions** + +```bash +uv run python -c " +import os; os.environ.setdefault('UNRAID_API_URL','http://fake'); os.environ.setdefault('UNRAID_API_KEY','fake') +from unraid_mcp.tools.notifications import ALL_ACTIONS as n +from unraid_mcp.tools.storage import ALL_ACTIONS as s +from unraid_mcp.tools.info import ALL_ACTIONS as i +from unraid_mcp.tools.docker import ALL_ACTIONS as d +from unraid_mcp.tools.virtualization import ALL_ACTIONS as v +from unraid_mcp.tools.array import ALL_ACTIONS as a +from unraid_mcp.tools.rclone import ALL_ACTIONS as r +from unraid_mcp.tools.users import ALL_ACTIONS as u +from unraid_mcp.tools.keys import ALL_ACTIONS as k +from unraid_mcp.tools.health import HEALTH_ACTIONS +from unraid_mcp.tools.settings import ALL_ACTIONS as st +from typing import get_args +total = len(n)+len(s)+len(i)+len(d)+len(v)+len(a)+len(r)+len(u)+len(k)+len(get_args(HEALTH_ACTIONS))+len(st) +print(f'Total actions: {total}') +" +``` + +Expected output: `Total actions: 104` + +- [ ] **Run full test suite one final time** + +```bash +uv run pytest -v --tb=short 2>&1 | tail -20 +``` + +Expected: All tests PASS. + +- [ ] **Update MEMORY.md counts** + +Update `/home/jmagar/.claude/projects/-home-jmagar-workspace-unraid-mcp/memory/MEMORY.md`: +- Change `10 tools, 76 actions` → `11 tools, 104 actions` +- Update test count to reflect new total +- Add `unraid_settings` row to the Tool Reference table diff --git a/pyproject.toml b/pyproject.toml index ad129aa..27835b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ build-backend = "hatchling.build" # ============================================================================ [project] name = "unraid-mcp" -version = "0.2.1" +version = "0.3.0" description = "MCP Server for Unraid API - provides tools to interact with an Unraid server's GraphQL API" readme = "README.md" license = {file = "LICENSE"} diff --git a/tests/http_layer/test_request_construction.py b/tests/http_layer/test_request_construction.py index c183788..5e3efa8 100644 --- a/tests/http_layer/test_request_construction.py +++ b/tests/http_layer/test_request_construction.py @@ -228,9 +228,7 @@ class TestGraphQLErrorHandling: @respx.mock async def test_idempotent_start_error_returns_success(self) -> None: respx.post(API_URL).mock( - return_value=_graphql_response( - errors=[{"message": "Container already running"}] - ) + return_value=_graphql_response(errors=[{"message": "Container already running"}]) ) result = await make_graphql_request( 'mutation { docker { start(id: "x") } }', @@ -242,9 +240,7 @@ class TestGraphQLErrorHandling: @respx.mock async def test_idempotent_stop_error_returns_success(self) -> None: respx.post(API_URL).mock( - return_value=_graphql_response( - errors=[{"message": "Container not running"}] - ) + return_value=_graphql_response(errors=[{"message": "Container not running"}]) ) result = await make_graphql_request( 'mutation { docker { stop(id: "x") } }', @@ -275,7 +271,13 @@ class TestInfoToolRequests: async def test_overview_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"info": {"os": {"platform": "linux", "hostname": "tower"}, "cpu": {}, "memory": {}}} + { + "info": { + "os": {"platform": "linux", "hostname": "tower"}, + "cpu": {}, + "memory": {}, + } + } ) ) tool = self._get_tool() @@ -329,9 +331,7 @@ class TestInfoToolRequests: @respx.mock async def test_online_sends_correct_query(self) -> None: - route = respx.post(API_URL).mock( - return_value=_graphql_response({"online": True}) - ) + route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) tool = self._get_tool() await tool(action="online") body = _extract_request_body(route.calls.last.request) @@ -374,9 +374,7 @@ class TestDockerToolRequests: async def test_list_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"docker": {"containers": [ - {"id": "c1", "names": ["plex"], "state": "running"} - ]}} + {"docker": {"containers": [{"id": "c1", "names": ["plex"], "state": "running"}]}} ) ) tool = self._get_tool() @@ -389,10 +387,16 @@ class TestDockerToolRequests: container_id = "a" * 64 route = respx.post(API_URL).mock( return_value=_graphql_response( - {"docker": {"start": { - "id": container_id, "names": ["plex"], - "state": "running", "status": "Up", - }}} + { + "docker": { + "start": { + "id": container_id, + "names": ["plex"], + "state": "running", + "status": "Up", + } + } + } ) ) tool = self._get_tool() @@ -406,10 +410,16 @@ class TestDockerToolRequests: container_id = "b" * 64 route = respx.post(API_URL).mock( return_value=_graphql_response( - {"docker": {"stop": { - "id": container_id, "names": ["sonarr"], - "state": "exited", "status": "Exited", - }}} + { + "docker": { + "stop": { + "id": container_id, + "names": ["sonarr"], + "state": "exited", + "status": "Exited", + } + } + } ) ) tool = self._get_tool() @@ -451,9 +461,11 @@ class TestDockerToolRequests: async def test_networks_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"dockerNetworks": [ - {"id": "n1", "name": "bridge", "driver": "bridge", "scope": "local"} - ]} + { + "dockerNetworks": [ + {"id": "n1", "name": "bridge", "driver": "bridge", "scope": "local"} + ] + } ) ) tool = self._get_tool() @@ -464,9 +476,7 @@ class TestDockerToolRequests: @respx.mock async def test_check_updates_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( - return_value=_graphql_response( - {"docker": {"containerUpdateStatuses": []}} - ) + return_value=_graphql_response({"docker": {"containerUpdateStatuses": []}}) ) tool = self._get_tool() await tool(action="check_updates") @@ -485,17 +495,29 @@ class TestDockerToolRequests: call_count += 1 if "StopContainer" in body["query"]: return _graphql_response( - {"docker": {"stop": { - "id": container_id, "names": ["app"], - "state": "exited", "status": "Exited", - }}} + { + "docker": { + "stop": { + "id": container_id, + "names": ["app"], + "state": "exited", + "status": "Exited", + } + } + } ) if "StartContainer" in body["query"]: return _graphql_response( - {"docker": {"start": { - "id": container_id, "names": ["app"], - "state": "running", "status": "Up", - }}} + { + "docker": { + "start": { + "id": container_id, + "names": ["app"], + "state": "running", + "status": "Up", + } + } + } ) return _graphql_response({"docker": {"containers": []}}) @@ -522,10 +544,16 @@ class TestDockerToolRequests: ) if "StartContainer" in body["query"]: return _graphql_response( - {"docker": {"start": { - "id": resolved_id, "names": ["plex"], - "state": "running", "status": "Up", - }}} + { + "docker": { + "start": { + "id": resolved_id, + "names": ["plex"], + "state": "running", + "status": "Up", + } + } + } ) return _graphql_response({}) @@ -546,17 +574,17 @@ class TestVMToolRequests: @staticmethod def _get_tool(): - return make_tool_fn( - "unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm" - ) + return make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm") @respx.mock async def test_list_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"vms": {"domains": [ - {"id": "v1", "name": "win10", "state": "running", "uuid": "u1"} - ]}} + { + "vms": { + "domains": [{"id": "v1", "name": "win10", "state": "running", "uuid": "u1"}] + } + } ) ) tool = self._get_tool() @@ -567,9 +595,7 @@ class TestVMToolRequests: @respx.mock async def test_start_sends_mutation_with_id(self) -> None: - route = respx.post(API_URL).mock( - return_value=_graphql_response({"vm": {"start": True}}) - ) + route = respx.post(API_URL).mock(return_value=_graphql_response({"vm": {"start": True}})) tool = self._get_tool() result = await tool(action="start", vm_id="vm-123") body = _extract_request_body(route.calls.last.request) @@ -579,9 +605,7 @@ class TestVMToolRequests: @respx.mock async def test_stop_sends_mutation_with_id(self) -> None: - route = respx.post(API_URL).mock( - return_value=_graphql_response({"vm": {"stop": True}}) - ) + route = respx.post(API_URL).mock(return_value=_graphql_response({"vm": {"stop": True}})) tool = self._get_tool() await tool(action="stop", vm_id="vm-456") body = _extract_request_body(route.calls.last.request) @@ -615,10 +639,14 @@ class TestVMToolRequests: async def test_details_finds_vm_by_name(self) -> None: respx.post(API_URL).mock( return_value=_graphql_response( - {"vms": {"domains": [ - {"id": "v1", "name": "win10", "state": "running", "uuid": "uuid-1"}, - {"id": "v2", "name": "ubuntu", "state": "stopped", "uuid": "uuid-2"}, - ]}} + { + "vms": { + "domains": [ + {"id": "v1", "name": "win10", "state": "running", "uuid": "uuid-1"}, + {"id": "v2", "name": "ubuntu", "state": "stopped", "uuid": "uuid-2"}, + ] + } + } ) ) tool = self._get_tool() @@ -642,9 +670,15 @@ class TestArrayToolRequests: async def test_parity_status_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"array": {"parityCheckStatus": { - "progress": 50, "speed": "100 MB/s", "errors": 0, - }}} + { + "array": { + "parityCheckStatus": { + "progress": 50, + "speed": "100 MB/s", + "errors": 0, + } + } + } ) ) tool = self._get_tool() @@ -706,9 +740,7 @@ class TestStorageToolRequests: @staticmethod def _get_tool(): - return make_tool_fn( - "unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage" - ) + return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage") @respx.mock async def test_shares_sends_correct_query(self) -> None: @@ -737,10 +769,16 @@ class TestStorageToolRequests: async def test_disk_details_sends_variable(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"disk": { - "id": "d1", "device": "sda", "name": "Disk 1", - "serialNum": "SN123", "size": 1000000, "temperature": 35, - }} + { + "disk": { + "id": "d1", + "device": "sda", + "name": "Disk 1", + "serialNum": "SN123", + "size": 1000000, + "temperature": 35, + } + } ) ) tool = self._get_tool() @@ -766,10 +804,14 @@ class TestStorageToolRequests: async def test_logs_sends_path_and_lines_variables(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"logFile": { - "path": "/var/log/syslog", "content": "log line", - "totalLines": 100, "startLine": 1, - }} + { + "logFile": { + "path": "/var/log/syslog", + "content": "log line", + "totalLines": 100, + "startLine": 1, + } + } ) ) tool = self._get_tool() @@ -787,9 +829,7 @@ class TestStorageToolRequests: @respx.mock async def test_unassigned_sends_correct_query(self) -> None: - route = respx.post(API_URL).mock( - return_value=_graphql_response({"unassignedDevices": []}) - ) + route = respx.post(API_URL).mock(return_value=_graphql_response({"unassignedDevices": []})) tool = self._get_tool() result = await tool(action="unassigned") body = _extract_request_body(route.calls.last.request) @@ -817,9 +857,13 @@ class TestNotificationsToolRequests: async def test_overview_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"notifications": {"overview": { - "unread": {"info": 1, "warning": 0, "alert": 0, "total": 1}, - }}} + { + "notifications": { + "overview": { + "unread": {"info": 1, "warning": 0, "alert": 0, "total": 1}, + } + } + } ) ) tool = self._get_tool() @@ -833,9 +877,7 @@ class TestNotificationsToolRequests: return_value=_graphql_response({"notifications": {"list": []}}) ) tool = self._get_tool() - await tool( - action="list", list_type="ARCHIVE", importance="WARNING", offset=5, limit=10 - ) + await tool(action="list", list_type="ARCHIVE", importance="WARNING", offset=5, limit=10) body = _extract_request_body(route.calls.last.request) assert "ListNotifications" in body["query"] filt = body["variables"]["filter"] @@ -859,9 +901,13 @@ class TestNotificationsToolRequests: async def test_create_sends_input_variables(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"createNotification": { - "id": "n1", "title": "Test", "importance": "INFO", - }} + { + "createNotification": { + "id": "n1", + "title": "Test", + "importance": "INFO", + } + } ) ) tool = self._get_tool() @@ -870,21 +916,19 @@ class TestNotificationsToolRequests: title="Test", subject="Sub", description="Desc", - importance="normal", + importance="info", ) body = _extract_request_body(route.calls.last.request) assert "CreateNotification" in body["query"] inp = body["variables"]["input"] assert inp["title"] == "Test" assert inp["subject"] == "Sub" - assert inp["importance"] == "NORMAL" # uppercased from "normal" + assert inp["importance"] == "INFO" # uppercased from "info" @respx.mock async def test_archive_sends_id_variable(self) -> None: route = respx.post(API_URL).mock( - return_value=_graphql_response( - {"archiveNotification": {"id": "notif-1"}} - ) + return_value=_graphql_response({"archiveNotification": {"id": "notif-1"}}) ) tool = self._get_tool() await tool(action="archive", notification_id="notif-1") @@ -901,9 +945,7 @@ class TestNotificationsToolRequests: @respx.mock async def test_delete_sends_id_and_type(self) -> None: route = respx.post(API_URL).mock( - return_value=_graphql_response( - {"deleteNotification": {"unread": {"total": 0}}} - ) + return_value=_graphql_response({"deleteNotification": {"unread": {"total": 0}}}) ) tool = self._get_tool() await tool( @@ -920,9 +962,7 @@ class TestNotificationsToolRequests: @respx.mock async def test_archive_all_sends_importance_when_provided(self) -> None: route = respx.post(API_URL).mock( - return_value=_graphql_response( - {"archiveAll": {"archive": {"total": 1}}} - ) + return_value=_graphql_response({"archiveAll": {"archive": {"total": 1}}}) ) tool = self._get_tool() await tool(action="archive_all", importance="warning") @@ -941,9 +981,7 @@ class TestRCloneToolRequests: @staticmethod def _get_tool(): - return make_tool_fn( - "unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone" - ) + return make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone") @respx.mock async def test_list_remotes_sends_correct_query(self) -> None: @@ -962,9 +1000,15 @@ class TestRCloneToolRequests: async def test_config_form_sends_provider_type(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"rclone": {"configForm": { - "id": "form1", "dataSchema": {}, "uiSchema": {}, - }}} + { + "rclone": { + "configForm": { + "id": "form1", + "dataSchema": {}, + "uiSchema": {}, + } + } + } ) ) tool = self._get_tool() @@ -977,9 +1021,15 @@ class TestRCloneToolRequests: async def test_create_remote_sends_input_variables(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"rclone": {"createRCloneRemote": { - "name": "my-s3", "type": "s3", "parameters": {}, - }}} + { + "rclone": { + "createRCloneRemote": { + "name": "my-s3", + "type": "s3", + "parameters": {}, + } + } + } ) ) tool = self._get_tool() @@ -1025,18 +1075,20 @@ class TestUsersToolRequests: @staticmethod def _get_tool(): - return make_tool_fn( - "unraid_mcp.tools.users", "register_users_tool", "unraid_users" - ) + return make_tool_fn("unraid_mcp.tools.users", "register_users_tool", "unraid_users") @respx.mock async def test_me_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"me": { - "id": "u1", "name": "admin", - "description": "Admin", "roles": ["admin"], - }} + { + "me": { + "id": "u1", + "name": "admin", + "description": "Admin", + "roles": ["admin"], + } + } ) ) tool = self._get_tool() @@ -1061,9 +1113,7 @@ class TestKeysToolRequests: @respx.mock async def test_list_sends_correct_query(self) -> None: route = respx.post(API_URL).mock( - return_value=_graphql_response( - {"apiKeys": [{"id": "k1", "name": "my-key"}]} - ) + return_value=_graphql_response({"apiKeys": [{"id": "k1", "name": "my-key"}]}) ) tool = self._get_tool() result = await tool(action="list") @@ -1088,10 +1138,16 @@ class TestKeysToolRequests: async def test_create_sends_input_variables(self) -> None: route = respx.post(API_URL).mock( return_value=_graphql_response( - {"apiKey": {"create": { - "id": "k2", "name": "new-key", - "key": "secret", "roles": ["read"], - }}} + { + "apiKey": { + "create": { + "id": "k2", + "name": "new-key", + "key": "secret", + "roles": ["read"], + } + } + } ) ) tool = self._get_tool() @@ -1147,15 +1203,11 @@ class TestHealthToolRequests: @staticmethod def _get_tool(): - return make_tool_fn( - "unraid_mcp.tools.health", "register_health_tool", "unraid_health" - ) + return make_tool_fn("unraid_mcp.tools.health", "register_health_tool", "unraid_health") @respx.mock async def test_test_connection_sends_online_query(self) -> None: - route = respx.post(API_URL).mock( - return_value=_graphql_response({"online": True}) - ) + route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) tool = self._get_tool() result = await tool(action="test_connection") body = _extract_request_body(route.calls.last.request) @@ -1166,21 +1218,23 @@ class TestHealthToolRequests: @respx.mock async def test_check_sends_comprehensive_query(self) -> None: route = respx.post(API_URL).mock( - return_value=_graphql_response({ - "info": { - "machineId": "m1", - "time": 1234567890, - "versions": {"unraid": "7.0"}, - "os": {"uptime": 86400}, - }, - "array": {"state": "STARTED"}, - "notifications": { - "overview": {"unread": {"alert": 0, "warning": 1, "total": 3}}, - }, - "docker": { - "containers": [{"id": "c1", "state": "running", "status": "Up"}], - }, - }) + return_value=_graphql_response( + { + "info": { + "machineId": "m1", + "time": 1234567890, + "versions": {"unraid": "7.0"}, + "os": {"uptime": 86400}, + }, + "array": {"state": "STARTED"}, + "notifications": { + "overview": {"unread": {"alert": 0, "warning": 1, "total": 3}}, + }, + "docker": { + "containers": [{"id": "c1", "state": "running", "status": "Up"}], + }, + } + ) ) tool = self._get_tool() result = await tool(action="check") @@ -1191,9 +1245,7 @@ class TestHealthToolRequests: @respx.mock async def test_test_connection_measures_latency(self) -> None: - respx.post(API_URL).mock( - return_value=_graphql_response({"online": True}) - ) + respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) tool = self._get_tool() result = await tool(action="test_connection") assert "latency_ms" in result @@ -1202,18 +1254,21 @@ class TestHealthToolRequests: @respx.mock async def test_check_reports_warning_on_alerts(self) -> None: respx.post(API_URL).mock( - return_value=_graphql_response({ - "info": { - "machineId": "m1", "time": 0, - "versions": {"unraid": "7.0"}, - "os": {"uptime": 0}, - }, - "array": {"state": "STARTED"}, - "notifications": { - "overview": {"unread": {"alert": 3, "warning": 0, "total": 5}}, - }, - "docker": {"containers": []}, - }) + return_value=_graphql_response( + { + "info": { + "machineId": "m1", + "time": 0, + "versions": {"unraid": "7.0"}, + "os": {"uptime": 0}, + }, + "array": {"state": "STARTED"}, + "notifications": { + "overview": {"unread": {"alert": 3, "warning": 0, "total": 5}}, + }, + "docker": {"containers": []}, + } + ) ) tool = self._get_tool() result = await tool(action="check") @@ -1252,24 +1307,16 @@ class TestCrossCuttingConcerns: @respx.mock async def test_tool_error_from_http_layer_propagates(self) -> None: """When an HTTP error occurs, the ToolError bubbles up through the tool.""" - respx.post(API_URL).mock( - return_value=httpx.Response(500, text="Server Error") - ) - tool = make_tool_fn( - "unraid_mcp.tools.info", "register_info_tool", "unraid_info" - ) + respx.post(API_URL).mock(return_value=httpx.Response(500, text="Server Error")) + tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") with pytest.raises(ToolError, match="Unraid API returned HTTP 500"): await tool(action="online") @respx.mock async def test_network_error_propagates_through_tool(self) -> None: """When a network error occurs, the ToolError bubbles up through the tool.""" - respx.post(API_URL).mock( - side_effect=httpx.ConnectError("Connection refused") - ) - tool = make_tool_fn( - "unraid_mcp.tools.info", "register_info_tool", "unraid_info" - ) + respx.post(API_URL).mock(side_effect=httpx.ConnectError("Connection refused")) + tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") with pytest.raises(ToolError, match="Network error connecting to Unraid API"): await tool(action="online") @@ -1277,12 +1324,8 @@ class TestCrossCuttingConcerns: async def test_graphql_error_propagates_through_tool(self) -> None: """When a GraphQL error occurs, the ToolError bubbles up through the tool.""" respx.post(API_URL).mock( - return_value=_graphql_response( - errors=[{"message": "Permission denied"}] - ) - ) - tool = make_tool_fn( - "unraid_mcp.tools.info", "register_info_tool", "unraid_info" + return_value=_graphql_response(errors=[{"message": "Permission denied"}]) ) + tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") with pytest.raises(ToolError, match="Permission denied"): await tool(action="online") diff --git a/tests/safety/test_destructive_guards.py b/tests/safety/test_destructive_guards.py index 4432bfe..09fe49b 100644 --- a/tests/safety/test_destructive_guards.py +++ b/tests/safety/test_destructive_guards.py @@ -88,8 +88,7 @@ class TestDestructiveActionRegistries: """Each tool's DESTRUCTIVE_ACTIONS must exactly match the audited set.""" info = KNOWN_DESTRUCTIVE[tool_key] assert info["runtime_set"] == info["actions"], ( - f"{tool_key}: DESTRUCTIVE_ACTIONS is {info['runtime_set']}, " - f"expected {info['actions']}" + f"{tool_key}: DESTRUCTIVE_ACTIONS is {info['runtime_set']}, expected {info['actions']}" ) @pytest.mark.parametrize("tool_key", list(KNOWN_DESTRUCTIVE.keys())) @@ -131,7 +130,8 @@ class TestDestructiveActionRegistries: missing.extend( f"{tool_key}/{action_name}" for action_name in mutations - if ("delete" in action_name or "remove" in action_name) and action_name not in destructive + if ("delete" in action_name or "remove" in action_name) + and action_name not in destructive ) assert not missing, ( f"Mutations with 'delete'/'remove' not in DESTRUCTIVE_ACTIONS: {missing}" @@ -198,7 +198,11 @@ def _mock_keys_graphql() -> Generator[AsyncMock, None, None]: _TOOL_REGISTRY = { "docker": ("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"), "vm": ("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm"), - "notifications": ("unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"), + "notifications": ( + "unraid_mcp.tools.notifications", + "register_notifications_tool", + "unraid_notifications", + ), "rclone": ("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone"), "keys": ("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys"), } @@ -275,7 +279,11 @@ class TestConfirmAllowsExecution: async def test_docker_update_all_with_confirm(self, _mock_docker_graphql: AsyncMock) -> None: _mock_docker_graphql.return_value = { - "docker": {"updateAllContainers": [{"id": "c1", "names": ["app"], "state": "running", "status": "Up"}]} + "docker": { + "updateAllContainers": [ + {"id": "c1", "names": ["app"], "state": "running", "status": "Up"} + ] + } } tool_fn = make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker") result = await tool_fn(action="update_all", confirm=True) @@ -305,7 +313,12 @@ class TestConfirmAllowsExecution: assert result["success"] is True async def test_notifications_delete_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None: - _mock_notif_graphql.return_value = {"deleteNotification": {"unread": {"total": 0}}} + _mock_notif_graphql.return_value = { + "deleteNotification": { + "unread": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + } + } tool_fn = make_tool_fn( "unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications" ) @@ -317,8 +330,15 @@ class TestConfirmAllowsExecution: ) assert result["success"] is True - async def test_notifications_delete_archived_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None: - _mock_notif_graphql.return_value = {"deleteArchivedNotifications": {"archive": {"total": 0}}} + async def test_notifications_delete_archived_with_confirm( + self, _mock_notif_graphql: AsyncMock + ) -> None: + _mock_notif_graphql.return_value = { + "deleteArchivedNotifications": { + "unread": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + } + } tool_fn = make_tool_fn( "unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications" ) diff --git a/tests/schema/test_query_validation.py b/tests/schema/test_query_validation.py index c72aad6..c08ba40 100644 --- a/tests/schema/test_query_validation.py +++ b/tests/schema/test_query_validation.py @@ -153,10 +153,25 @@ class TestInfoQueries: from unraid_mcp.tools.info import QUERIES expected_actions = { - "overview", "array", "network", "registration", "connect", - "variables", "metrics", "services", "display", "config", - "online", "owner", "settings", "server", "servers", - "flash", "ups_devices", "ups_device", "ups_config", + "overview", + "array", + "network", + "registration", + "connect", + "variables", + "metrics", + "services", + "display", + "config", + "online", + "owner", + "settings", + "server", + "servers", + "flash", + "ups_devices", + "ups_device", + "ups_config", } assert set(QUERIES.keys()) == expected_actions @@ -314,8 +329,13 @@ class TestDockerQueries: from unraid_mcp.tools.docker import QUERIES expected = { - "list", "details", "logs", "networks", - "network_details", "port_conflicts", "check_updates", + "list", + "details", + "logs", + "networks", + "network_details", + "port_conflicts", + "check_updates", } assert set(QUERIES.keys()) == expected @@ -520,7 +540,19 @@ class TestNotificationMutations: def test_all_notification_mutations_covered(self, schema: GraphQLSchema) -> None: from unraid_mcp.tools.notifications import MUTATIONS - expected = {"create", "archive", "unread", "delete", "delete_archived", "archive_all"} + expected = { + "create", + "archive", + "unread", + "delete", + "delete_archived", + "archive_all", + "archive_many", + "create_unique", + "unarchive_many", + "unarchive_all", + "recalculate", + } assert set(MUTATIONS.keys()) == expected @@ -713,8 +745,7 @@ class TestSchemaCompleteness: failures.append(f"{tool_name}/MUTATIONS/{action}: {errors[0]}") assert not failures, ( - f"{len(failures)} of {total} operations failed validation:\n" - + "\n".join(failures) + f"{len(failures)} of {total} operations failed validation:\n" + "\n".join(failures) ) def test_schema_has_query_type(self, schema: GraphQLSchema) -> None: diff --git a/tests/test_array.py b/tests/test_array.py index 9837022..d7d786f 100644 --- a/tests/test_array.py +++ b/tests/test_array.py @@ -43,6 +43,7 @@ class TestArrayValidation: tool_fn = _make_tool() with pytest.raises(ToolError, match="correct is required"): await tool_fn(action="parity_start") + _mock_graphql.assert_not_called() class TestArrayActions: @@ -53,6 +54,8 @@ class TestArrayActions: assert result["success"] is True assert result["action"] == "parity_start" _mock_graphql.assert_called_once() + call_args = _mock_graphql.call_args + assert call_args[0][1] == {"correct": False} async def test_parity_start_with_correct(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = {"parityCheck": {"start": True}} diff --git a/tests/test_notifications.py b/tests/test_notifications.py index 890d5fd..1d3cf2c 100644 --- a/tests/test_notifications.py +++ b/tests/test_notifications.py @@ -90,7 +90,7 @@ class TestNotificationsActions: title="Test", subject="Test Subject", description="Test Desc", - importance="normal", + importance="info", ) assert result["success"] is True @@ -101,7 +101,12 @@ class TestNotificationsActions: assert result["success"] is True async def test_delete_with_confirm(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"deleteNotification": {"unread": {"total": 0}}} + _mock_graphql.return_value = { + "deleteNotification": { + "unread": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + } + } tool_fn = _make_tool() result = await tool_fn( action="delete", @@ -112,7 +117,12 @@ class TestNotificationsActions: assert result["success"] is True async def test_archive_all(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"archiveAll": {"archive": {"total": 1}}} + _mock_graphql.return_value = { + "archiveAll": { + "unread": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 1}, + } + } tool_fn = _make_tool() result = await tool_fn(action="archive_all") assert result["success"] is True @@ -138,7 +148,12 @@ class TestNotificationsActions: assert filter_var["offset"] == 5 async def test_delete_archived(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"deleteArchivedNotifications": {"archive": {"total": 0}}} + _mock_graphql.return_value = { + "deleteArchivedNotifications": { + "unread": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + } + } tool_fn = _make_tool() result = await tool_fn(action="delete_archived", confirm=True) assert result["success"] is True @@ -165,8 +180,8 @@ class TestNotificationsCreateValidation: importance="invalid", ) - async def test_info_importance_rejected(self, _mock_graphql: AsyncMock) -> None: - """INFO is listed in old docstring examples but rejected by the validator.""" + async def test_normal_importance_rejected(self, _mock_graphql: AsyncMock) -> None: + """NORMAL is not a valid GraphQL NotificationImportance value (INFO/WARNING/ALERT are).""" tool_fn = _make_tool() with pytest.raises(ToolError, match="importance must be one of"): await tool_fn( @@ -174,7 +189,7 @@ class TestNotificationsCreateValidation: title="T", subject="S", description="D", - importance="info", + importance="normal", ) async def test_alert_importance_accepted(self, _mock_graphql: AsyncMock) -> None: @@ -193,7 +208,7 @@ class TestNotificationsCreateValidation: title="x" * 201, subject="S", description="D", - importance="normal", + importance="info", ) async def test_subject_too_long_rejected(self, _mock_graphql: AsyncMock) -> None: @@ -204,7 +219,7 @@ class TestNotificationsCreateValidation: title="T", subject="x" * 501, description="D", - importance="normal", + importance="info", ) async def test_description_too_long_rejected(self, _mock_graphql: AsyncMock) -> None: @@ -215,17 +230,118 @@ class TestNotificationsCreateValidation: title="T", subject="S", description="x" * 2001, - importance="normal", + importance="info", ) async def test_title_at_max_accepted(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"createNotification": {"id": "n:1", "importance": "NORMAL"}} + _mock_graphql.return_value = {"createNotification": {"id": "n:1", "importance": "INFO"}} tool_fn = _make_tool() result = await tool_fn( action="create", title="x" * 200, subject="S", description="D", - importance="normal", + importance="info", ) assert result["success"] is True + + +class TestNewNotificationMutations: + async def test_archive_many_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "archiveNotifications": { + "unread": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + "archive": {"info": 2, "warning": 0, "alert": 0, "total": 2}, + } + } + tool_fn = _make_tool() + result = await tool_fn(action="archive_many", notification_ids=["n:1", "n:2"]) + assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1] == {"ids": ["n:1", "n:2"]} + + async def test_archive_many_requires_ids(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="notification_ids"): + await tool_fn(action="archive_many") + + async def test_create_unique_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "notifyIfUnique": {"id": "n:1", "title": "Test", "importance": "INFO"} + } + tool_fn = _make_tool() + result = await tool_fn( + action="create_unique", + title="Test", + subject="Subj", + description="Desc", + importance="info", + ) + assert result["success"] is True + + async def test_create_unique_returns_none_when_duplicate( + self, _mock_graphql: AsyncMock + ) -> None: + _mock_graphql.return_value = {"notifyIfUnique": None} + tool_fn = _make_tool() + result = await tool_fn( + action="create_unique", + title="T", + subject="S", + description="D", + importance="info", + ) + assert result["success"] is True + assert result["duplicate"] is True + + async def test_create_unique_requires_fields(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="requires title"): + await tool_fn(action="create_unique") + + async def test_unarchive_many_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "unarchiveNotifications": { + "unread": {"info": 2, "warning": 0, "alert": 0, "total": 2}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + } + } + tool_fn = _make_tool() + result = await tool_fn(action="unarchive_many", notification_ids=["n:1", "n:2"]) + assert result["success"] is True + + async def test_unarchive_many_requires_ids(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + with pytest.raises(ToolError, match="notification_ids"): + await tool_fn(action="unarchive_many") + + async def test_unarchive_all_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "unarchiveAll": { + "unread": {"info": 5, "warning": 1, "alert": 0, "total": 6}, + "archive": {"info": 0, "warning": 0, "alert": 0, "total": 0}, + } + } + tool_fn = _make_tool() + result = await tool_fn(action="unarchive_all") + assert result["success"] is True + + async def test_unarchive_all_with_importance(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "unarchiveAll": {"unread": {"total": 1}, "archive": {"total": 0}} + } + tool_fn = _make_tool() + await tool_fn(action="unarchive_all", importance="WARNING") + call_args = _mock_graphql.call_args + assert call_args[0][1] == {"importance": "WARNING"} + + async def test_recalculate_success(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = { + "recalculateOverview": { + "unread": {"info": 3, "warning": 1, "alert": 0, "total": 4}, + "archive": {"info": 10, "warning": 0, "alert": 0, "total": 10}, + } + } + tool_fn = _make_tool() + result = await tool_fn(action="recalculate") + assert result["success"] is True diff --git a/unraid_mcp/config/logging.py b/unraid_mcp/config/logging.py index f0193d8..e291ba4 100644 --- a/unraid_mcp/config/logging.py +++ b/unraid_mcp/config/logging.py @@ -53,21 +53,22 @@ class OverwriteFileHandler(logging.FileHandler): ): try: base_path = Path(self.baseFilename) - if base_path.exists(): - file_size = base_path.stat().st_size - if file_size >= self.max_bytes: - # Close current stream - if self.stream: - self.stream.close() - - # Remove the old file and start fresh - if base_path.exists(): - base_path.unlink() - - # Reopen with truncate mode + file_size = base_path.stat().st_size if base_path.exists() else 0 + if file_size >= self.max_bytes: + old_stream = self.stream + self.stream = None + try: + old_stream.close() + base_path.unlink(missing_ok=True) self.stream = self._open() + except OSError: + # Recovery: attempt to reopen even if unlink failed + try: + self.stream = self._open() + except OSError: + self.stream = old_stream # Last resort: restore original - # Log a marker that the file was reset + if self.stream is not None: reset_record = logging.LogRecord( name="UnraidMCPServer.Logging", level=logging.INFO, @@ -184,27 +185,8 @@ def configure_fastmcp_logger_with_rich() -> logging.Logger | None: fastmcp_logger.setLevel(numeric_log_level) - # Also configure the root logger to catch any other logs - root_logger = logging.getLogger() - root_logger.handlers.clear() - root_logger.propagate = False - - # Rich Console Handler for root logger - root_console_handler = RichHandler( - console=console, - show_time=True, - show_level=True, - show_path=False, - rich_tracebacks=True, - tracebacks_show_locals=False, - markup=True, - ) - root_console_handler.setLevel(numeric_log_level) - root_logger.addHandler(root_console_handler) - - # Reuse the shared file handler for root logger - root_logger.addHandler(_shared_file_handler) - root_logger.setLevel(numeric_log_level) + # Set root logger level to avoid suppressing library warnings entirely + logging.getLogger().setLevel(numeric_log_level) return fastmcp_logger diff --git a/unraid_mcp/config/settings.py b/unraid_mcp/config/settings.py index 1478199..90aa05d 100644 --- a/unraid_mcp/config/settings.py +++ b/unraid_mcp/config/settings.py @@ -36,8 +36,27 @@ for dotenv_path in dotenv_paths: UNRAID_API_URL = os.getenv("UNRAID_API_URL") UNRAID_API_KEY = os.getenv("UNRAID_API_KEY") + # Server Configuration -UNRAID_MCP_PORT = int(os.getenv("UNRAID_MCP_PORT", "6970")) +def _parse_port(env_var: str, default: int) -> int: + """Parse a port number from environment variable with validation.""" + raw = os.getenv(env_var, str(default)) + try: + port = int(raw) + except ValueError: + import sys + + print(f"FATAL: {env_var}={raw!r} is not a valid integer port number", file=sys.stderr) + sys.exit(1) + if not (1 <= port <= 65535): + import sys + + print(f"FATAL: {env_var}={port} outside valid port range 1-65535", file=sys.stderr) + sys.exit(1) + return port + + +UNRAID_MCP_PORT = _parse_port("UNRAID_MCP_PORT", 6970) UNRAID_MCP_HOST = os.getenv("UNRAID_MCP_HOST", "0.0.0.0") # noqa: S104 — intentional for Docker UNRAID_MCP_TRANSPORT = os.getenv("UNRAID_MCP_TRANSPORT", "streamable-http").lower() @@ -58,7 +77,7 @@ IS_DOCKER = Path("/.dockerenv").exists() LOGS_DIR = Path("/app/logs") if IS_DOCKER else PROJECT_ROOT / "logs" LOG_FILE_PATH = LOGS_DIR / LOG_FILE_NAME -# Ensure logs directory exists; if creation fails, fall back to /tmp. +# Ensure logs directory exists; if creation fails, fall back to PROJECT_ROOT / ".cache" / "logs". try: LOGS_DIR.mkdir(parents=True, exist_ok=True) except OSError: @@ -97,9 +116,11 @@ def get_config_summary() -> dict[str, Any]: """ is_valid, missing = validate_required_config() + from ..core.utils import safe_display_url + return { "api_url_configured": bool(UNRAID_API_URL), - "api_url_preview": UNRAID_API_URL[:20] + "..." if UNRAID_API_URL else None, + "api_url_preview": safe_display_url(UNRAID_API_URL) if UNRAID_API_URL else None, "api_key_configured": bool(UNRAID_API_KEY), "server_host": UNRAID_MCP_HOST, "server_port": UNRAID_MCP_PORT, @@ -110,5 +131,7 @@ def get_config_summary() -> dict[str, Any]: "config_valid": is_valid, "missing_config": missing if not is_valid else None, } + + # Re-export application version from a single source of truth. VERSION = APP_VERSION diff --git a/unraid_mcp/core/client.py b/unraid_mcp/core/client.py index eb452d6..10bdb92 100644 --- a/unraid_mcp/core/client.py +++ b/unraid_mcp/core/client.py @@ -51,9 +51,7 @@ def _is_sensitive_key(key: str) -> bool: def redact_sensitive(obj: Any) -> Any: """Recursively redact sensitive values from nested dicts/lists.""" if isinstance(obj, dict): - return { - k: ("***" if _is_sensitive_key(k) else redact_sensitive(v)) for k, v in obj.items() - } + return {k: ("***" if _is_sensitive_key(k) else redact_sensitive(v)) for k, v in obj.items()} if isinstance(obj, list): return [redact_sensitive(item) for item in obj] return obj @@ -149,10 +147,16 @@ class _QueryCache: Keyed by a hash of (query, variables). Entries expire after _CACHE_TTL_SECONDS. Only caches responses for queries whose operation name is in _CACHEABLE_QUERY_PREFIXES. Mutation requests always bypass the cache. + + Thread-safe via asyncio.Lock. Bounded to _MAX_ENTRIES with FIFO eviction (oldest + expiry timestamp evicted first when the store is full). """ + _MAX_ENTRIES: Final[int] = 256 + def __init__(self) -> None: self._store: dict[str, tuple[float, dict[str, Any]]] = {} + self._lock: Final[asyncio.Lock] = asyncio.Lock() @staticmethod def _cache_key(query: str, variables: dict[str, Any] | None) -> str: @@ -170,26 +174,32 @@ class _QueryCache: return False return match.group(1) in _CACHEABLE_QUERY_PREFIXES - def get(self, query: str, variables: dict[str, Any] | None) -> dict[str, Any] | None: + async def get(self, query: str, variables: dict[str, Any] | None) -> dict[str, Any] | None: """Return cached result if present and not expired, else None.""" - key = self._cache_key(query, variables) - entry = self._store.get(key) - if entry is None: - return None - expires_at, data = entry - if time.monotonic() > expires_at: - del self._store[key] - return None - return data + async with self._lock: + key = self._cache_key(query, variables) + entry = self._store.get(key) + if entry is None: + return None + expires_at, data = entry + if time.monotonic() > expires_at: + del self._store[key] + return None + return data - def put(self, query: str, variables: dict[str, Any] | None, data: dict[str, Any]) -> None: - """Store a query result with TTL expiry.""" - key = self._cache_key(query, variables) - self._store[key] = (time.monotonic() + _CACHE_TTL_SECONDS, data) + async def put(self, query: str, variables: dict[str, Any] | None, data: dict[str, Any]) -> None: + """Store a query result with TTL expiry, evicting oldest entry if at capacity.""" + async with self._lock: + if len(self._store) >= self._MAX_ENTRIES: + oldest_key = min(self._store, key=lambda k: self._store[k][0]) + del self._store[oldest_key] + key = self._cache_key(query, variables) + self._store[key] = (time.monotonic() + _CACHE_TTL_SECONDS, data) - def invalidate_all(self) -> None: + async def invalidate_all(self) -> None: """Clear the entire cache (called after mutations).""" - self._store.clear() + async with self._lock: + self._store.clear() _query_cache = _QueryCache() @@ -310,10 +320,10 @@ async def make_graphql_request( if not UNRAID_API_KEY: raise ToolError("UNRAID_API_KEY not configured") - # Check TTL cache for stable read-only queries + # Check TTL cache — short-circuits rate limiter on hits is_mutation = query.lstrip().startswith("mutation") if not is_mutation and _query_cache.is_cacheable(query): - cached = _query_cache.get(query, variables) + cached = await _query_cache.get(query, variables) if cached is not None: logger.debug("Returning cached response for query") return cached @@ -399,9 +409,9 @@ async def make_graphql_request( # Invalidate cache on mutations; cache eligible query results if is_mutation: - _query_cache.invalidate_all() + await _query_cache.invalidate_all() elif _query_cache.is_cacheable(query): - _query_cache.put(query, variables, result) + await _query_cache.put(query, variables, result) return result diff --git a/unraid_mcp/main.py b/unraid_mcp/main.py index a2957d5..b7c8859 100644 --- a/unraid_mcp/main.py +++ b/unraid_mcp/main.py @@ -11,12 +11,19 @@ import sys async def shutdown_cleanup() -> None: """Cleanup resources on server shutdown.""" + try: + from .subscriptions.manager import subscription_manager + + await subscription_manager.stop_all() + except Exception as e: + print(f"Error stopping subscriptions during cleanup: {e}", file=sys.stderr) + try: from .core.client import close_http_client await close_http_client() except Exception as e: - print(f"Error during cleanup: {e}") + print(f"Error during cleanup: {e}", file=sys.stderr) def _run_shutdown_cleanup() -> None: diff --git a/unraid_mcp/server.py b/unraid_mcp/server.py index 91794af..1c13a88 100644 --- a/unraid_mcp/server.py +++ b/unraid_mcp/server.py @@ -10,8 +10,6 @@ from fastmcp import FastMCP from .config.logging import logger from .config.settings import ( - UNRAID_API_KEY, - UNRAID_API_URL, UNRAID_MCP_HOST, UNRAID_MCP_PORT, UNRAID_MCP_TRANSPORT, @@ -86,20 +84,10 @@ def run_server() -> None: ) sys.exit(1) - # Log configuration - if UNRAID_API_URL: - logger.info(f"UNRAID_API_URL loaded: {UNRAID_API_URL[:20]}...") - else: - logger.warning("UNRAID_API_URL not found in environment or .env file.") + # Log configuration (delegated to shared function) + from .config.logging import log_configuration_status - if UNRAID_API_KEY: - logger.info("UNRAID_API_KEY loaded: ****") - else: - logger.warning("UNRAID_API_KEY not found in environment or .env file.") - - logger.info(f"UNRAID_MCP_PORT set to: {UNRAID_MCP_PORT}") - logger.info(f"UNRAID_MCP_HOST set to: {UNRAID_MCP_HOST}") - logger.info(f"UNRAID_MCP_TRANSPORT set to: {UNRAID_MCP_TRANSPORT}") + log_configuration_status(logger) if UNRAID_VERIFY_SSL is False: logger.warning( diff --git a/unraid_mcp/subscriptions/diagnostics.py b/unraid_mcp/subscriptions/diagnostics.py index b9dbcad..5862455 100644 --- a/unraid_mcp/subscriptions/diagnostics.py +++ b/unraid_mcp/subscriptions/diagnostics.py @@ -22,7 +22,7 @@ from ..core.exceptions import ToolError from ..core.utils import safe_display_url from .manager import subscription_manager from .resources import ensure_subscriptions_started -from .utils import build_ws_ssl_context, build_ws_url +from .utils import _analyze_subscription_status, build_ws_ssl_context, build_ws_url _ALLOWED_SUBSCRIPTION_NAMES = frozenset( @@ -187,8 +187,10 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: # Get comprehensive status status = await subscription_manager.get_subscription_status() - # Initialize connection issues list with proper type - connection_issues: list[dict[str, Any]] = [] + # Analyze connection issues and error counts via the shared helper. + # This ensures "invalid_uri" and all other error states are counted + # consistently with the health tool's _diagnose_subscriptions path. + error_count, connection_issues = _analyze_subscription_status(status) # Add environment info with explicit typing diagnostic_info: dict[str, Any] = { @@ -210,7 +212,7 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: ), "active_count": len(subscription_manager.active_subscriptions), "with_data": len(subscription_manager.resource_data), - "in_error_state": 0, + "in_error_state": error_count, "connection_issues": connection_issues, }, } @@ -219,23 +221,6 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: with contextlib.suppress(ValueError): diagnostic_info["environment"]["websocket_url"] = build_ws_url() - # Analyze issues - for sub_name, sub_status in status.items(): - runtime = sub_status.get("runtime", {}) - connection_state = runtime.get("connection_state", "unknown") - - if connection_state in ["error", "auth_failed", "timeout", "max_retries_exceeded"]: - diagnostic_info["summary"]["in_error_state"] += 1 - - if runtime.get("last_error"): - connection_issues.append( - { - "subscription": sub_name, - "state": connection_state, - "error": runtime["last_error"], - } - ) - # Add troubleshooting recommendations recommendations: list[str] = [] diff --git a/unraid_mcp/subscriptions/manager.py b/unraid_mcp/subscriptions/manager.py index c58cd5a..dda682c 100644 --- a/unraid_mcp/subscriptions/manager.py +++ b/unraid_mcp/subscriptions/manager.py @@ -45,7 +45,7 @@ def _cap_log_content(data: dict[str, Any]) -> dict[str, Any]: elif ( key == "content" and isinstance(value, str) - and len(value.encode("utf-8", errors="replace")) > _MAX_RESOURCE_DATA_BYTES + and len(value) > _MAX_RESOURCE_DATA_BYTES # fast pre-check on char count ): lines = value.splitlines() original_line_count = len(lines) @@ -54,19 +54,15 @@ def _cap_log_content(data: dict[str, Any]) -> dict[str, Any]: if len(lines) > _MAX_RESOURCE_DATA_LINES: lines = lines[-_MAX_RESOURCE_DATA_LINES:] - # Enforce byte cap while preserving whole-line boundaries where possible. truncated = "\n".join(lines) - truncated_bytes = truncated.encode("utf-8", errors="replace") - while len(lines) > 1 and len(truncated_bytes) > _MAX_RESOURCE_DATA_BYTES: - lines = lines[1:] - truncated = "\n".join(lines) - truncated_bytes = truncated.encode("utf-8", errors="replace") - - # Last resort: if a single line still exceeds cap, hard-cap bytes. - if len(truncated_bytes) > _MAX_RESOURCE_DATA_BYTES: - truncated = truncated_bytes[-_MAX_RESOURCE_DATA_BYTES :].decode( - "utf-8", errors="ignore" - ) + # Encode once and slice bytes instead of O(n²) line-trim loop + encoded = truncated.encode("utf-8", errors="replace") + if len(encoded) > _MAX_RESOURCE_DATA_BYTES: + truncated = encoded[-_MAX_RESOURCE_DATA_BYTES:].decode("utf-8", errors="ignore") + # Strip partial first line that may have been cut mid-character + nl_pos = truncated.find("\n") + if nl_pos != -1: + truncated = truncated[nl_pos + 1 :] logger.warning( f"[RESOURCE] Capped log content from {original_line_count} to " @@ -202,6 +198,16 @@ class SubscriptionManager: else: logger.warning(f"[SUBSCRIPTION:{subscription_name}] No active subscription to stop") + async def stop_all(self) -> None: + """Stop all active subscriptions (called during server shutdown).""" + subscription_names = list(self.active_subscriptions.keys()) + for name in subscription_names: + try: + await self.stop_subscription(name) + except Exception as e: + logger.error(f"[SHUTDOWN] Error stopping subscription '{name}': {e}", exc_info=True) + logger.info(f"[SHUTDOWN] Stopped {len(subscription_names)} subscription(s)") + async def _subscription_loop( self, subscription_name: str, query: str, variables: dict[str, Any] | None ) -> None: @@ -512,9 +518,11 @@ class SubscriptionManager: f"({connected_duration:.0f}s < {_STABLE_CONNECTION_SECONDS}s), " f"keeping retry counter at {self.reconnect_attempts.get(subscription_name, 0)}" ) - - # Calculate backoff delay - retry_delay = min(retry_delay * 1.5, max_retry_delay) + # Only escalate backoff when connection was NOT stable + retry_delay = min(retry_delay * 1.5, max_retry_delay) + else: + # No connection was established — escalate backoff + retry_delay = min(retry_delay * 1.5, max_retry_delay) logger.info( f"[WEBSOCKET:{subscription_name}] Reconnecting in {retry_delay:.1f} seconds..." ) diff --git a/unraid_mcp/subscriptions/resources.py b/unraid_mcp/subscriptions/resources.py index 850ac1c..cd780a5 100644 --- a/unraid_mcp/subscriptions/resources.py +++ b/unraid_mcp/subscriptions/resources.py @@ -4,8 +4,10 @@ This module defines MCP resources that bridge between the subscription manager and the MCP protocol, providing fallback queries when subscription data is unavailable. """ +import asyncio import json import os +from typing import Final import anyio from fastmcp import FastMCP @@ -16,22 +18,29 @@ from .manager import subscription_manager # Global flag to track subscription startup _subscriptions_started = False +_startup_lock: Final[asyncio.Lock] = asyncio.Lock() async def ensure_subscriptions_started() -> None: """Ensure subscriptions are started, called from async context.""" global _subscriptions_started + # Fast-path: skip lock if already started if _subscriptions_started: return - logger.info("[STARTUP] First async operation detected, starting subscriptions...") - try: - await autostart_subscriptions() - _subscriptions_started = True - logger.info("[STARTUP] Subscriptions started successfully") - except Exception as e: - logger.error(f"[STARTUP] Failed to start subscriptions: {e}", exc_info=True) + # Slow-path: acquire lock for initialization (double-checked locking) + async with _startup_lock: + if _subscriptions_started: + return + + logger.info("[STARTUP] First async operation detected, starting subscriptions...") + try: + await autostart_subscriptions() + _subscriptions_started = True + logger.info("[STARTUP] Subscriptions started successfully") + except Exception as e: + logger.error(f"[STARTUP] Failed to start subscriptions: {e}", exc_info=True) async def autostart_subscriptions() -> None: diff --git a/unraid_mcp/subscriptions/utils.py b/unraid_mcp/subscriptions/utils.py index 45c3634..83c1c4d 100644 --- a/unraid_mcp/subscriptions/utils.py +++ b/unraid_mcp/subscriptions/utils.py @@ -1,6 +1,7 @@ """Shared utilities for the subscription system.""" import ssl as _ssl +from typing import Any from ..config.settings import UNRAID_API_URL, UNRAID_VERIFY_SSL @@ -52,3 +53,37 @@ def build_ws_ssl_context(ws_url: str) -> _ssl.SSLContext | None: ctx.check_hostname = False ctx.verify_mode = _ssl.CERT_NONE return ctx + + +def _analyze_subscription_status( + status: dict[str, Any], +) -> tuple[int, list[dict[str, Any]]]: + """Analyze subscription status dict, returning error count and connection issues. + + This is the canonical, shared implementation used by both the health tool + and the subscription diagnostics tool. + + Args: + status: Dict of subscription name -> status info from get_subscription_status(). + + Returns: + Tuple of (error_count, connection_issues_list). + """ + error_count = 0 + connection_issues: list[dict[str, Any]] = [] + + for sub_name, sub_status in status.items(): + runtime = sub_status.get("runtime", {}) + conn_state = runtime.get("connection_state", "unknown") + if conn_state in ("error", "auth_failed", "timeout", "max_retries_exceeded", "invalid_uri"): + error_count += 1 + if runtime.get("last_error"): + connection_issues.append( + { + "subscription": sub_name, + "state": conn_state, + "error": runtime["last_error"], + } + ) + + return error_count, connection_issues diff --git a/unraid_mcp/tools/__init__.py b/unraid_mcp/tools/__init__.py index c863e40..3216355 100644 --- a/unraid_mcp/tools/__init__.py +++ b/unraid_mcp/tools/__init__.py @@ -1,14 +1,14 @@ """MCP tools organized by functional domain. -10 consolidated tools with ~90 actions total: +10 consolidated tools with 76 actions total: unraid_info - System information queries (19 actions) - unraid_array - Array operations and power management (12 actions) + unraid_array - Array operations and parity management (5 actions) unraid_storage - Storage, disks, and logs (6 actions) unraid_docker - Docker container management (15 actions) unraid_vm - Virtual machine management (9 actions) unraid_notifications - Notification management (9 actions) unraid_rclone - Cloud storage remotes (4 actions) - unraid_users - User management (8 actions) + unraid_users - User management (1 action) unraid_keys - API key management (5 actions) unraid_health - Health monitoring and diagnostics (3 actions) """ diff --git a/unraid_mcp/tools/array.py b/unraid_mcp/tools/array.py index b712849..12a863b 100644 --- a/unraid_mcp/tools/array.py +++ b/unraid_mcp/tools/array.py @@ -73,7 +73,7 @@ def register_array_tool(mcp: FastMCP) -> None: """Manage Unraid array parity checks. Actions: - parity_start - Start parity check (optional correct=True to fix errors) + parity_start - Start parity check (correct=True to fix errors, correct=False for read-only; required) parity_pause - Pause running parity check parity_resume - Resume paused parity check parity_cancel - Cancel running parity check diff --git a/unraid_mcp/tools/docker.py b/unraid_mcp/tools/docker.py index b125551..a4f0862 100644 --- a/unraid_mcp/tools/docker.py +++ b/unraid_mcp/tools/docker.py @@ -233,8 +233,8 @@ async def _resolve_container_id(container_id: str, *, strict: bool = False) -> s data = await make_graphql_request(list_query) containers = safe_get(data, "docker", "containers", default=[]) - # Short hex prefix: match by ID prefix before trying name matching - if _DOCKER_SHORT_ID_PATTERN.match(container_id): + # Short hex prefix: match by ID prefix before trying name matching (strict bypasses this) + if not strict and _DOCKER_SHORT_ID_PATTERN.match(container_id): id_lower = container_id.lower() matches: list[dict[str, Any]] = [] for c in containers: diff --git a/unraid_mcp/tools/health.py b/unraid_mcp/tools/health.py index e025171..c87a803 100644 --- a/unraid_mcp/tools/health.py +++ b/unraid_mcp/tools/health.py @@ -21,6 +21,7 @@ from ..config.settings import ( from ..core.client import make_graphql_request from ..core.exceptions import ToolError, tool_error_handler from ..core.utils import safe_display_url +from ..subscriptions.utils import _analyze_subscription_status ALL_ACTIONS = {"check", "test_connection", "diagnose"} @@ -218,42 +219,6 @@ async def _comprehensive_check() -> dict[str, Any]: } -def _analyze_subscription_status( - status: dict[str, Any], -) -> tuple[int, list[dict[str, Any]]]: - """Analyze subscription status dict, returning error count and connection issues. - - This is the canonical implementation of subscription status analysis. - TODO: subscriptions/diagnostics.py has a similar status-analysis pattern - in diagnose_subscriptions(). That module could import and call this helper - directly to avoid divergence. See Code-H05. - - Args: - status: Dict of subscription name -> status info from get_subscription_status(). - - Returns: - Tuple of (error_count, connection_issues_list). - """ - error_count = 0 - connection_issues: list[dict[str, Any]] = [] - - for sub_name, sub_status in status.items(): - runtime = sub_status.get("runtime", {}) - conn_state = runtime.get("connection_state", "unknown") - if conn_state in ("error", "auth_failed", "timeout", "max_retries_exceeded"): - error_count += 1 - if runtime.get("last_error"): - connection_issues.append( - { - "subscription": sub_name, - "state": conn_state, - "error": runtime["last_error"], - } - ) - - return error_count, connection_issues - - async def _diagnose_subscriptions() -> dict[str, Any]: """Import and run subscription diagnostics.""" try: diff --git a/unraid_mcp/tools/keys.py b/unraid_mcp/tools/keys.py index 65dfeae..a761aa5 100644 --- a/unraid_mcp/tools/keys.py +++ b/unraid_mcp/tools/keys.py @@ -114,10 +114,14 @@ def register_keys_tool(mcp: FastMCP) -> None: if permissions is not None: input_data["permissions"] = permissions data = await make_graphql_request(MUTATIONS["create"], {"input": input_data}) - return { - "success": True, - "key": (data.get("apiKey") or {}).get("create", {}), - } + created_key = (data.get("apiKey") or {}).get("create") + if not created_key: + return { + "success": False, + "key": {}, + "message": "API key creation failed: no data returned from server", + } + return {"success": True, "key": created_key} if action == "update": if not key_id: @@ -128,10 +132,14 @@ def register_keys_tool(mcp: FastMCP) -> None: if roles is not None: input_data["roles"] = roles data = await make_graphql_request(MUTATIONS["update"], {"input": input_data}) - return { - "success": True, - "key": (data.get("apiKey") or {}).get("update", {}), - } + updated_key = (data.get("apiKey") or {}).get("update") + if not updated_key: + return { + "success": False, + "key": {}, + "message": "API key update failed: no data returned from server", + } + return {"success": True, "key": updated_key} if action == "delete": if not key_id: diff --git a/unraid_mcp/tools/notifications.py b/unraid_mcp/tools/notifications.py index a40eedb..e74b477 100644 --- a/unraid_mcp/tools/notifications.py +++ b/unraid_mcp/tools/notifications.py @@ -50,34 +50,80 @@ MUTATIONS: dict[str, str] = { """, "archive": """ mutation ArchiveNotification($id: PrefixedID!) { - archiveNotification(id: $id) + archiveNotification(id: $id) { id title importance } } """, "unread": """ mutation UnreadNotification($id: PrefixedID!) { - unreadNotification(id: $id) + unreadNotification(id: $id) { id title importance } } """, "delete": """ mutation DeleteNotification($id: PrefixedID!, $type: NotificationType!) { - deleteNotification(id: $id, type: $type) + deleteNotification(id: $id, type: $type) { + unread { info warning alert total } + archive { info warning alert total } + } } """, "delete_archived": """ mutation DeleteArchivedNotifications { - deleteArchivedNotifications + deleteArchivedNotifications { + unread { info warning alert total } + archive { info warning alert total } + } } """, "archive_all": """ mutation ArchiveAllNotifications($importance: NotificationImportance) { - archiveAll(importance: $importance) + archiveAll(importance: $importance) { + unread { info warning alert total } + archive { info warning alert total } + } + } + """, + "archive_many": """ + mutation ArchiveNotifications($ids: [PrefixedID!]!) { + archiveNotifications(ids: $ids) { + unread { info warning alert total } + archive { info warning alert total } + } + } + """, + "create_unique": """ + mutation NotifyIfUnique($input: NotificationData!) { + notifyIfUnique(input: $input) { id title importance } + } + """, + "unarchive_many": """ + mutation UnarchiveNotifications($ids: [PrefixedID!]!) { + unarchiveNotifications(ids: $ids) { + unread { info warning alert total } + archive { info warning alert total } + } + } + """, + "unarchive_all": """ + mutation UnarchiveAll($importance: NotificationImportance) { + unarchiveAll(importance: $importance) { + unread { info warning alert total } + archive { info warning alert total } + } + } + """, + "recalculate": """ + mutation RecalculateOverview { + recalculateOverview { + unread { info warning alert total } + archive { info warning alert total } + } } """, } DESTRUCTIVE_ACTIONS = {"delete", "delete_archived"} ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) -_VALID_IMPORTANCE = {"ALERT", "WARNING", "NORMAL"} +_VALID_IMPORTANCE = {"ALERT", "WARNING", "INFO"} NOTIFICATION_ACTIONS = Literal[ "overview", @@ -89,6 +135,11 @@ NOTIFICATION_ACTIONS = Literal[ "delete", "delete_archived", "archive_all", + "archive_many", + "create_unique", + "unarchive_many", + "unarchive_all", + "recalculate", ] if set(get_args(NOTIFICATION_ACTIONS)) != ALL_ACTIONS: @@ -108,6 +159,7 @@ def register_notifications_tool(mcp: FastMCP) -> None: action: NOTIFICATION_ACTIONS, confirm: bool = False, notification_id: str | None = None, + notification_ids: list[str] | None = None, notification_type: str | None = None, importance: str | None = None, offset: int = 0, @@ -129,6 +181,11 @@ def register_notifications_tool(mcp: FastMCP) -> None: delete - Delete a notification (requires notification_id, notification_type, confirm=True) delete_archived - Delete all archived notifications (requires confirm=True) archive_all - Archive all notifications (optional importance filter) + archive_many - Archive multiple notifications by ID (requires notification_ids) + create_unique - Create notification only if no equivalent unread exists (requires title, subject, description, importance) + unarchive_many - Move notifications back to unread (requires notification_ids) + unarchive_all - Move all archived notifications to unread (optional importance filter) + recalculate - Recompute overview counts from disk """ if action not in ALL_ACTIONS: raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") @@ -212,6 +269,55 @@ def register_notifications_tool(mcp: FastMCP) -> None: data = await make_graphql_request(MUTATIONS["archive_all"], variables) return {"success": True, "action": "archive_all", "data": data} + if action == "archive_many": + if not notification_ids: + raise ToolError("notification_ids is required for 'archive_many' action") + data = await make_graphql_request( + MUTATIONS["archive_many"], {"ids": notification_ids} + ) + return {"success": True, "action": "archive_many", "data": data} + + if action == "create_unique": + if title is None or subject is None or description is None or importance is None: + raise ToolError( + "create_unique requires title, subject, description, and importance" + ) + if importance.upper() not in _VALID_IMPORTANCE: + raise ToolError( + f"importance must be one of: {', '.join(sorted(_VALID_IMPORTANCE))}. " + f"Got: '{importance}'" + ) + input_data = { + "title": title, + "subject": subject, + "description": description, + "importance": importance.upper(), + } + data = await make_graphql_request(MUTATIONS["create_unique"], {"input": input_data}) + notification = data.get("notifyIfUnique") + if notification is None: + return {"success": True, "duplicate": True, "data": None} + return {"success": True, "duplicate": False, "data": notification} + + if action == "unarchive_many": + if not notification_ids: + raise ToolError("notification_ids is required for 'unarchive_many' action") + data = await make_graphql_request( + MUTATIONS["unarchive_many"], {"ids": notification_ids} + ) + return {"success": True, "action": "unarchive_many", "data": data} + + if action == "unarchive_all": + vars_: dict[str, Any] | None = None + if importance: + vars_ = {"importance": importance.upper()} + data = await make_graphql_request(MUTATIONS["unarchive_all"], vars_) + return {"success": True, "action": "unarchive_all", "data": data} + + if action == "recalculate": + data = await make_graphql_request(MUTATIONS["recalculate"]) + return {"success": True, "action": "recalculate", "data": data} + raise ToolError(f"Unhandled action '{action}' — this is a bug") logger.info("Notifications tool registered successfully") diff --git a/unraid_mcp/tools/virtualization.py b/unraid_mcp/tools/virtualization.py index 89166b5..f54baf7 100644 --- a/unraid_mcp/tools/virtualization.py +++ b/unraid_mcp/tools/virtualization.py @@ -114,56 +114,42 @@ def register_vm_tool(mcp: FastMCP) -> None: raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") with tool_error_handler("vm", action, logger): - try: - logger.info(f"Executing unraid_vm action={action}") + logger.info(f"Executing unraid_vm action={action}") - if action == "list": - data = await make_graphql_request(QUERIES["list"]) - if data.get("vms"): - vms = data["vms"].get("domains") or data["vms"].get("domain") or [] - if isinstance(vms, dict): - vms = [vms] - return {"vms": vms} - return {"vms": []} - - if action == "details": - data = await make_graphql_request(QUERIES["details"]) - if not data.get("vms"): - raise ToolError("No VM data returned from server") + if action == "list": + data = await make_graphql_request(QUERIES["list"]) + if data.get("vms"): vms = data["vms"].get("domains") or data["vms"].get("domain") or [] if isinstance(vms, dict): vms = [vms] - for vm in vms: - if ( - vm.get("uuid") == vm_id - or vm.get("id") == vm_id - or vm.get("name") == vm_id - ): - return dict(vm) - available = [f"{v.get('name')} (UUID: {v.get('uuid')})" for v in vms] - raise ToolError(f"VM '{vm_id}' not found. Available: {', '.join(available)}") + return {"vms": vms} + return {"vms": []} - # Mutations - if action in MUTATIONS: - data = await make_graphql_request(MUTATIONS[action], {"id": vm_id}) - field = _MUTATION_FIELDS.get(action, action) - if data.get("vm") and field in data["vm"]: - return { - "success": data["vm"][field], - "action": action, - "vm_id": vm_id, - } - raise ToolError(f"Failed to {action} VM or unexpected response") + if action == "details": + data = await make_graphql_request(QUERIES["details"]) + if not data.get("vms"): + raise ToolError("No VM data returned from server") + vms = data["vms"].get("domains") or data["vms"].get("domain") or [] + if isinstance(vms, dict): + vms = [vms] + for vm in vms: + if vm.get("uuid") == vm_id or vm.get("id") == vm_id or vm.get("name") == vm_id: + return dict(vm) + available = [f"{v.get('name')} (UUID: {v.get('uuid')})" for v in vms] + raise ToolError(f"VM '{vm_id}' not found. Available: {', '.join(available)}") - raise ToolError(f"Unhandled action '{action}' — this is a bug") + # Mutations + if action in MUTATIONS: + data = await make_graphql_request(MUTATIONS[action], {"id": vm_id}) + field = _MUTATION_FIELDS.get(action, action) + if data.get("vm") and field in data["vm"]: + return { + "success": data["vm"][field], + "action": action, + "vm_id": vm_id, + } + raise ToolError(f"Failed to {action} VM or unexpected response") - except ToolError: - raise - except Exception as e: - if "VMs are not available" in str(e): - raise ToolError( - "VMs not available on this server. Check VM support is enabled." - ) from e - raise + raise ToolError(f"Unhandled action '{action}' — this is a bug") logger.info("VM tool registered successfully") diff --git a/uv.lock b/uv.lock index 313cdc1..b141fa9 100644 --- a/uv.lock +++ b/uv.lock @@ -1729,7 +1729,7 @@ wheels = [ [[package]] name = "unraid-mcp" -version = "0.2.0" +version = "0.2.1" source = { editable = "." } dependencies = [ { name = "fastapi" }, From bdb215536625b7d4bb72bdba40980f6ff07ef498 Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Fri, 13 Mar 2026 02:40:43 -0400 Subject: [PATCH 08/34] fix: update TestQueryCache tests to await async cache methods _QueryCache.get/put/invalidate_all are async (use asyncio.Lock internally). Updated 6 sync test methods to async def with proper await calls so they test the actual async interface rather than calling unawaited coroutines. --- tests/test_client.py | 40 ++++++++++++++++++++-------------------- uv.lock | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index 904409c..db9629f 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -556,42 +556,42 @@ class TestRateLimiter: class TestQueryCache: """Unit tests for the TTL query cache.""" - def test_miss_on_empty_cache(self) -> None: + async def test_miss_on_empty_cache(self) -> None: cache = _QueryCache() - assert cache.get("{ info }", None) is None + assert await cache.get("{ info }", None) is None - def test_put_and_get_hit(self) -> None: + async def test_put_and_get_hit(self) -> None: cache = _QueryCache() data = {"result": "ok"} - cache.put("GetNetworkConfig { }", None, data) - result = cache.get("GetNetworkConfig { }", None) + await cache.put("GetNetworkConfig { }", None, data) + result = await cache.get("GetNetworkConfig { }", None) assert result == data - def test_expired_entry_returns_none(self) -> None: + async def test_expired_entry_returns_none(self) -> None: cache = _QueryCache() data = {"result": "ok"} - cache.put("GetNetworkConfig { }", None, data) + await cache.put("GetNetworkConfig { }", None, data) # Manually expire the entry key = cache._cache_key("GetNetworkConfig { }", None) cache._store[key] = (time.monotonic() - 1.0, data) # expired 1 sec ago - assert cache.get("GetNetworkConfig { }", None) is None + assert await cache.get("GetNetworkConfig { }", None) is None - def test_invalidate_all_clears_store(self) -> None: + async def test_invalidate_all_clears_store(self) -> None: cache = _QueryCache() - cache.put("GetNetworkConfig { }", None, {"x": 1}) - cache.put("GetOwner { }", None, {"y": 2}) + await cache.put("GetNetworkConfig { }", None, {"x": 1}) + await cache.put("GetOwner { }", None, {"y": 2}) assert len(cache._store) == 2 - cache.invalidate_all() + await cache.invalidate_all() assert len(cache._store) == 0 - def test_variables_affect_cache_key(self) -> None: + async def test_variables_affect_cache_key(self) -> None: """Different variables produce different cache keys.""" cache = _QueryCache() q = "GetNetworkConfig($id: ID!) { network(id: $id) { name } }" - cache.put(q, {"id": "1"}, {"name": "eth0"}) - cache.put(q, {"id": "2"}, {"name": "eth1"}) - assert cache.get(q, {"id": "1"}) == {"name": "eth0"} - assert cache.get(q, {"id": "2"}) == {"name": "eth1"} + await cache.put(q, {"id": "1"}, {"name": "eth0"}) + await cache.put(q, {"id": "2"}, {"name": "eth1"}) + assert await cache.get(q, {"id": "1"}) == {"name": "eth0"} + assert await cache.get(q, {"id": "2"}) == {"name": "eth1"} def test_is_cacheable_returns_true_for_known_prefixes(self) -> None: assert _QueryCache.is_cacheable("GetNetworkConfig { ... }") is True @@ -619,14 +619,14 @@ class TestQueryCache: """Anonymous 'query { ... }' has no operation name — must not be cached.""" assert _QueryCache.is_cacheable("query { network { name } }") is False - def test_expired_entry_removed_from_store(self) -> None: + async def test_expired_entry_removed_from_store(self) -> None: """Accessing an expired entry should remove it from the internal store.""" cache = _QueryCache() - cache.put("GetOwner { }", None, {"owner": "root"}) + await cache.put("GetOwner { }", None, {"owner": "root"}) key = cache._cache_key("GetOwner { }", None) cache._store[key] = (time.monotonic() - 1.0, {"owner": "root"}) assert key in cache._store - cache.get("GetOwner { }", None) # triggers deletion + await cache.get("GetOwner { }", None) # triggers deletion assert key not in cache._store diff --git a/uv.lock b/uv.lock index b141fa9..0172c95 100644 --- a/uv.lock +++ b/uv.lock @@ -1729,7 +1729,7 @@ wheels = [ [[package]] name = "unraid-mcp" -version = "0.2.1" +version = "0.3.0" source = { editable = "." } dependencies = [ { name = "fastapi" }, From ac5639301c588e17cf4434db02eb1cb3d2041816 Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Fri, 13 Mar 2026 02:44:26 -0400 Subject: [PATCH 09/34] fix: split subscription_lock, fix safe_get None semantics, validate notification enums P-01: Replace single subscription_lock with two fine-grained locks: - _task_lock guards active_subscriptions (task lifecycle operations) - _data_lock guards resource_data (WebSocket message writes and reads) Eliminates serialization between WebSocket updates and tool reads. CQ-05: safe_get now preserves explicit None at terminal key. Uses sentinel _MISSING to distinguish "key absent" (returns default) from "key=null" (returns None). Fixes conflation that masked intentional null values from the Unraid API. SEC-M04: Validate list_type, importance, and notification_type against known enums before dispatching to GraphQL. Prevents wasting rate-limited requests on invalid values and avoids leaking schema details in errors. --- scripts/test-tools.sh | 742 ++++++++++++++++++++++++++++ unraid_mcp/core/utils.py | 22 +- unraid_mcp/server.py | 2 + unraid_mcp/subscriptions/manager.py | 20 +- unraid_mcp/tools/notifications.py | 22 + 5 files changed, 794 insertions(+), 14 deletions(-) create mode 100755 scripts/test-tools.sh diff --git a/scripts/test-tools.sh b/scripts/test-tools.sh new file mode 100755 index 0000000..4cd2f59 --- /dev/null +++ b/scripts/test-tools.sh @@ -0,0 +1,742 @@ +#!/usr/bin/env bash +# ============================================================================= +# test-tools.sh — Integration smoke-test for unraid-mcp MCP server tools +# +# Exercises every non-destructive action across all 10 tools using mcporter. +# The server is launched ad-hoc via mcporter's --stdio flag so no persistent +# process or registered server entry is required. +# +# Usage: +# ./scripts/test-tools.sh [--timeout-ms N] [--parallel] [--verbose] +# +# Options: +# --timeout-ms N Per-call timeout in milliseconds (default: 25000) +# --parallel Run independent test groups in parallel (default: off) +# --verbose Print raw mcporter output for each call +# +# Exit codes: +# 0 — all tests passed or skipped +# 1 — one or more tests failed +# 2 — prerequisite check failed (mcporter, uv, server startup) +# ============================================================================= + +set -Eeuo pipefail +shopt -s inherit_errexit + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- +readonly SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)" +readonly PROJECT_DIR="$(cd -- "${SCRIPT_DIR}/.." && pwd -P)" +readonly SCRIPT_NAME="$(basename -- "${BASH_SOURCE[0]}")" +readonly TS_START="$(date +%s%N)" # nanosecond epoch +readonly LOG_FILE="${TMPDIR:-/tmp}/${SCRIPT_NAME%.sh}.$(date +%Y%m%d-%H%M%S).log" + +# Colours (disabled automatically when stdout is not a terminal) +if [[ -t 1 ]]; then + C_RESET='\033[0m' + C_BOLD='\033[1m' + C_GREEN='\033[0;32m' + C_RED='\033[0;31m' + C_YELLOW='\033[0;33m' + C_CYAN='\033[0;36m' + C_DIM='\033[2m' +else + C_RESET='' C_BOLD='' C_GREEN='' C_RED='' C_YELLOW='' C_CYAN='' C_DIM='' +fi + +# --------------------------------------------------------------------------- +# Defaults (overridable via flags) +# --------------------------------------------------------------------------- +CALL_TIMEOUT_MS=25000 +USE_PARALLEL=false +VERBOSE=false + +# --------------------------------------------------------------------------- +# Counters (updated by run_test / skip_test) +# --------------------------------------------------------------------------- +PASS_COUNT=0 +FAIL_COUNT=0 +SKIP_COUNT=0 +declare -a FAIL_NAMES=() + +# --------------------------------------------------------------------------- +# Argument parsing +# --------------------------------------------------------------------------- +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + --timeout-ms) + CALL_TIMEOUT_MS="${2:?--timeout-ms requires a value}" + shift 2 + ;; + --parallel) + USE_PARALLEL=true + shift + ;; + --verbose) + VERBOSE=true + shift + ;; + -h|--help) + printf 'Usage: %s [--timeout-ms N] [--parallel] [--verbose]\n' "${SCRIPT_NAME}" + exit 0 + ;; + *) + printf '[ERROR] Unknown argument: %s\n' "$1" >&2 + exit 2 + ;; + esac + done +} + +# --------------------------------------------------------------------------- +# Logging helpers +# --------------------------------------------------------------------------- +log_info() { printf "${C_CYAN}[INFO]${C_RESET} %s\n" "$*" | tee -a "${LOG_FILE}"; } +log_warn() { printf "${C_YELLOW}[WARN]${C_RESET} %s\n" "$*" | tee -a "${LOG_FILE}"; } +log_error() { printf "${C_RED}[ERROR]${C_RESET} %s\n" "$*" | tee -a "${LOG_FILE}" >&2; } + +elapsed_ms() { + local now + now="$(date +%s%N)" + printf '%d' "$(( (now - TS_START) / 1000000 ))" +} + +# --------------------------------------------------------------------------- +# Cleanup trap +# --------------------------------------------------------------------------- +cleanup() { + local rc=$? + if [[ $rc -ne 0 ]]; then + log_warn "Script exited with rc=${rc}. Log: ${LOG_FILE}" + fi +} +trap cleanup EXIT + +# --------------------------------------------------------------------------- +# Prerequisite checks +# --------------------------------------------------------------------------- +check_prerequisites() { + local missing=false + + if ! command -v mcporter &>/dev/null; then + log_error "mcporter not found in PATH. Install it and re-run." + missing=true + fi + + if ! command -v uv &>/dev/null; then + log_error "uv not found in PATH. Install it and re-run." + missing=true + fi + + if ! command -v python3 &>/dev/null; then + log_error "python3 not found in PATH." + missing=true + fi + + if [[ ! -f "${PROJECT_DIR}/pyproject.toml" ]]; then + log_error "pyproject.toml not found at ${PROJECT_DIR}. Wrong directory?" + missing=true + fi + + if [[ "${missing}" == true ]]; then + return 2 + fi +} + +# --------------------------------------------------------------------------- +# Server startup smoke-test +# Launches the stdio server and calls unraid_health action=check. +# Returns 0 if the server responds (even with an API error — that still +# means the Python process started cleanly), non-zero on import failure. +# --------------------------------------------------------------------------- +smoke_test_server() { + log_info "Smoke-testing server startup..." + + local output + output="$( + mcporter call \ + --stdio "uv run unraid-mcp-server" \ + --cwd "${PROJECT_DIR}" \ + --name "unraid-smoke" \ + --tool unraid_health \ + --args '{"action":"check"}' \ + --timeout 30000 \ + --output json \ + 2>&1 + )" || true + + # If mcporter returns the offline error the server failed to import/start + if printf '%s' "${output}" | grep -q '"kind": "offline"'; then + log_error "Server failed to start. Output:" + printf '%s\n' "${output}" >&2 + log_error "Common causes:" + log_error " • Missing module: check 'uv run unraid-mcp-server' locally" + log_error " • server.py has an import for a file that doesn't exist yet" + log_error " • Environment variable UNRAID_API_URL or UNRAID_API_KEY missing" + return 2 + fi + + log_info "Server started successfully (health response received)." + return 0 +} + +# --------------------------------------------------------------------------- +# mcporter call wrapper +# Usage: mcporter_call +# Writes the mcporter JSON output to stdout. +# Returns the mcporter exit code. +# --------------------------------------------------------------------------- +mcporter_call() { + local tool_name="${1:?tool_name required}" + local args_json="${2:?args_json required}" + + mcporter call \ + --stdio "uv run unraid-mcp-server" \ + --cwd "${PROJECT_DIR}" \ + --name "unraid" \ + --tool "${tool_name}" \ + --args "${args_json}" \ + --timeout "${CALL_TIMEOUT_MS}" \ + --output json \ + 2>&1 +} + +# --------------------------------------------------------------------------- +# Test runner +# Usage: run_test