diff --git a/.claude-plugin/README.md b/.claude-plugin/README.md index f89b879..65d7fb1 100644 --- a/.claude-plugin/README.md +++ b/.claude-plugin/README.md @@ -31,32 +31,34 @@ This directory contains the Claude Code marketplace configuration for the Unraid Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring. **Features:** -- 11 tools with ~104 actions (queries and mutations) -- Real-time system metrics +- 1 consolidated `unraid` tool with ~108 actions across 15 domains +- Real-time live subscriptions (CPU, memory, logs, array state, UPS) - Disk health and temperature monitoring - Docker container management - VM status and control - Log file access - Network share information - Notification management +- Plugin, rclone, API key, and OIDC management -**Version:** 0.2.0 +**Version:** 1.1.2 **Category:** Infrastructure **Tags:** unraid, monitoring, homelab, graphql, docker, virtualization ## Configuration -After installation, configure your Unraid server credentials: +After installation, run setup to configure credentials interactively: -```bash -export UNRAID_API_URL="https://your-unraid-server/graphql" -export UNRAID_API_KEY="your-api-key" +```python +unraid(action="health", subaction="setup") ``` +Credentials are stored at `~/.unraid-mcp/.env` automatically. + **Getting an API Key:** 1. Open Unraid WebUI 2. Go to Settings → Management Access → API Keys -3. Click "Create" and select "Viewer" role +3. Click "Create" and select "Viewer" role (or appropriate roles for mutations) 4. Copy the generated API key ## Documentation diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index c814f03..ec9c534 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -1,12 +1,12 @@ { - "name": "jmagar-unraid-mcp", + "name": "unraid-mcp", "owner": { "name": "jmagar", "email": "jmagar@users.noreply.github.com" }, "metadata": { - "description": "Comprehensive Unraid server management and monitoring tools via GraphQL API", - "version": "0.2.0", + "description": "Comprehensive Unraid server management and monitoring via a single consolidated MCP tool (~108 actions across 15 domains)", + "version": "1.1.2", "homepage": "https://github.com/jmagar/unraid-mcp", "repository": "https://github.com/jmagar/unraid-mcp" }, @@ -14,8 +14,8 @@ { "name": "unraid", "source": "./", - "description": "Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring", - "version": "0.2.0", + "description": "Query, monitor, and manage Unraid servers via GraphQL API — single `unraid` tool with action+subaction routing for array, disk, docker, VM, notifications, live metrics, and more", + "version": "1.1.2", "tags": ["unraid", "monitoring", "homelab", "graphql", "docker", "virtualization"], "category": "infrastructure" } diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json index 3dc2822..5885bd7 100644 --- a/.claude-plugin/plugin.json +++ b/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "unraid", - "description": "Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring", - "version": "0.6.0", + "description": "Query, monitor, and manage Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring", + "version": "1.1.3", "author": { "name": "jmagar", "email": "jmagar@users.noreply.github.com" diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index a5368a7..0000000 --- a/.dockerignore +++ /dev/null @@ -1,31 +0,0 @@ -Dockerfile -.dockerignore -.git -.gitignore -__pycache__ -*.pyc -*.pyo -*.pyd -.env -.env.local -.env.* -*.log -logs/ -*.db -*.sqlite3 -instance/ -.pytest_cache/ -.ty_cache/ -.venv/ -venv/ -env/ -.vscode/ -cline_docs/ -tests/ -docs/ -scripts/ -commands/ -.full-review/ -.claude-plugin/ -*.md -!README.md diff --git a/.env.example b/.env.example index 1dee38e..5ee55ab 100644 --- a/.env.example +++ b/.env.example @@ -8,7 +8,10 @@ UNRAID_API_KEY=your_unraid_api_key # MCP Server Settings # ------------------- -UNRAID_MCP_TRANSPORT=streamable-http # Options: streamable-http (recommended), sse (deprecated), stdio +# Default transport is stdio (for Claude Desktop / local use). +# Docker Compose overrides this to streamable-http automatically. +# Options: stdio (default), streamable-http, sse (deprecated) +UNRAID_MCP_TRANSPORT=stdio UNRAID_MCP_HOST=0.0.0.0 UNRAID_MCP_PORT=6970 @@ -34,4 +37,22 @@ UNRAID_MAX_RECONNECT_ATTEMPTS=10 # Optional: Custom log file path for subscription auto-start diagnostics # Defaults to standard log if not specified -# UNRAID_AUTOSTART_LOG_PATH=/custom/path/to/autostart.log \ No newline at end of file +# UNRAID_AUTOSTART_LOG_PATH=/custom/path/to/autostart.log + +# Credentials Directory Override (Optional) +# ----------------------------------------- +# Override the credentials directory (default: ~/.unraid-mcp/) +# UNRAID_CREDENTIALS_DIR=/custom/path/to/credentials + +# Authentication +# -------------- +# This server has NO built-in authentication. +# When running as HTTP (streamable-http transport), protect the endpoint with +# an external OAuth gateway or identity-aware proxy: +# +# Reverse proxy with auth: nginx + OAuth2-proxy, Caddy + forward auth +# Identity-aware proxy: Authelia, Authentik, Pomerium +# Network isolation: bind to 127.0.0.1, use VPN/Tailscale for access +# Firewall rules: restrict source IPs at the network layer +# +# stdio transport (default) is inherently local — no network exposure. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..d8e3dd1 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,77 @@ +name: CI + +on: + push: + branches: ["main", "feat/**", "fix/**"] + pull_request: + branches: ["main"] + +jobs: + lint: + name: Lint & Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.9.25" + - name: Install dependencies + run: uv sync --group dev + - name: Ruff check + run: uv run ruff check unraid_mcp/ tests/ + - name: Ruff format + run: uv run ruff format --check unraid_mcp/ tests/ + + typecheck: + name: Type Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.9.25" + - name: Install dependencies + run: uv sync --group dev + - name: ty check + run: uv run ty check unraid_mcp/ + + test: + name: Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.9.25" + - name: Install dependencies + run: uv sync --group dev + - name: Run tests with coverage (excluding integration/slow) + run: uv run pytest -m "not slow and not integration" --cov=unraid_mcp --cov-report=term-missing --tb=short -q + + version-sync: + name: Version Sync Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Check pyproject.toml and plugin.json versions match + run: | + TOML_VER=$(grep '^version = ' pyproject.toml | sed 's/version = "//;s/"//') + PLUGIN_VER=$(python3 -c "import json; print(json.load(open('.claude-plugin/plugin.json'))['version'])") + echo "pyproject.toml: $TOML_VER" + echo "plugin.json: $PLUGIN_VER" + if [ "$TOML_VER" != "$PLUGIN_VER" ]; then + echo "ERROR: Version mismatch! Update .claude-plugin/plugin.json to match pyproject.toml" + exit 1 + fi + echo "Versions in sync: $TOML_VER" + + audit: + name: Security Audit + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v5 + with: + version: "0.9.25" + - name: Dependency audit + run: uv audit diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..cffc75c --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,158 @@ +# Changelog + +All notable changes to this project are documented here. + +## [1.1.3] - 2026-03-24 + +### Fixed +- **Docs accuracy**: `disk/logs` docs corrected to use `log_path`/`tail_lines` parameters (were `path`/`lines`) +- **Docs accuracy**: `rclone/create_remote` docs corrected to `provider_type`/`config_data` (were `type`/`fields`) +- **Docs accuracy**: `setting/update` docs corrected to `settings_input` parameter (was `settings`) +- **Docs accuracy**: `key/create` now documents `roles` as optional; `add_role`/`remove_role` corrected to `roles` (plural) +- **Docs accuracy**: `oidc/validate_session` now documents required `token` parameter +- **Docs accuracy**: `parity_start` quick-reference example now includes required `correct=False` +- **Docs accuracy**: `log_tail` README example now includes required `path="/var/log/syslog"` +- **Docs accuracy**: `live/parity_progress` added to event-driven subscriptions list in troubleshooting guide +- **Docs accuracy**: `live/array_state` wording softened — "may show connecting indefinitely" vs "will always show" +- **Markdown**: `endpoints.md` top-level heading moved before blockquote disclaimer (MD041) +- **Tests**: `test_resources.py` now uses `_get_resource()` helper instead of raw `mcp.providers[0]._components[...]` access; isolates FastMCP internals to one location + +--- + +## [1.1.2] - 2026-03-23 + +### Security +- **Path traversal**: Removed `/mnt/` from `_ALLOWED_LOG_PREFIXES` — was exposing all Unraid user shares to path-based reads +- **Path traversal**: Added early `..` detection for `disk/logs` and `live/log_tail` before any filesystem access; added `/boot/` prefix restriction for `flash_backup` source paths +- **Timing-safe auth**: `verify_token` now uses `hmac.compare_digest` instead of `==` to prevent timing oracle attacks on API key comparison +- **Traceback leak**: `include_traceback` in `ErrorHandlingMiddleware` is now gated on `DEBUG` log level; production deployments no longer expose stack traces + +### Fixed +- **Health check**: `_comprehensive_health_check` now re-raises `CredentialsNotConfiguredError` instead of swallowing it into a generic unhealthy status +- **UPS device query**: Removed non-existent `nominalPower` and `currentPower` fields from `ups_device` query — every call was failing against the live API +- **Stale credential bindings**: Subscription modules (`manager.py`, `snapshot.py`, `utils.py`, `diagnostics.py`) previously captured `UNRAID_API_KEY`/`UNRAID_API_URL` at import time; replaced with `_settings.ATTR` call-time access so `apply_runtime_config()` updates propagate correctly after credential elicitation + +### Added +- **CI pipeline**: `.github/workflows/ci.yml` with 5 jobs — lint (`ruff`), typecheck (`ty`), test (`pytest -m "not integration"`), version-sync check, and `uv audit` dependency scan +- **Coverage threshold**: `fail_under = 80` added to `[tool.coverage.report]` +- **Version sync check**: `scripts/validate-marketplace.sh` now verifies `pyproject.toml` and `plugin.json` versions match + +### Changed +- **Docs**: Updated `CLAUDE.md`, `README.md` to reflect 3 tools (1 primary + 2 diagnostic); corrected system domain count (19→18); fixed scripts comment +- **Docs**: `docs/AUTHENTICATION.md` H1 retitled to "Authentication Setup Guide" +- **Docs**: Added `UNRAID_CREDENTIALS_DIR` commented entry to `.env.example` +- Removed `from __future__ import annotations` from `snapshot.py` (caused TC002 false positives with FastMCP) +- Added `# noqa: ASYNC109` to `timeout` parameters in `_handle_live` and `unraid()` (valid suppressions) +- Fixed `start_array*` → `start_array` in tool docstring table (`start_array` is not in `_ARRAY_DESTRUCTIVE`) + +### Refactored +- **Path validation**: Extracted `_validate_path()` in `unraid.py` — consolidates traversal check, `normpath`, and prefix validation used by both `disk/logs` and `live/log_tail` into one place; eliminates duplication +- **WebSocket auth payload**: Extracted `build_connection_init()` in `subscriptions/utils.py` — removes 4 duplicate `connection_init` blocks from `snapshot.py` (×2), `manager.py`, and `diagnostics.py`; also fixes a bug in `diagnostics.py` where `x-api-key: None` was sent when no API key was configured +- Removed `_LIVE_ALLOWED_LOG_PREFIXES` alias — direct reference to `_ALLOWED_LOG_PREFIXES` +- Moved `import hmac` to module level in `server.py` (was inside `verify_token` hot path) + +--- + +## [1.1.1] - 2026-03-16 + +### Added +- **API key auth**: `Authorization: Bearer ` bearer token authentication via `ApiKeyVerifier` — machine-to-machine access without OAuth browser flow +- **MultiAuth**: When both Google OAuth and API key are configured, `MultiAuth` accepts either method +- **Google OAuth**: Full `GoogleProvider` integration — browser-based OAuth 2.0 flow with JWT session tokens; `UNRAID_MCP_JWT_SIGNING_KEY` for stable tokens across restarts +- **`fastmcp.json`**: Dev tooling configs for FastMCP + +### Fixed +- Auth test isolation: use `os.environ[k] = ""` instead of `delenv` to prevent dotenv re-injection between test reloads + +--- + +## [1.1.0] - 2026-03-16 + +### Breaking Changes +- **Tool consolidation**: 15 individual domain tools (`unraid_docker`, `unraid_vm`, etc.) merged into single `unraid` tool with `action` + `subaction` routing + - Old: `unraid_docker(action="list")` + - New: `unraid(action="docker", subaction="list")` + +### Added +- **`live` tool** (11 subactions): Real-time WebSocket subscription snapshots — `cpu`, `memory`, `cpu_telemetry`, `array_state`, `parity_progress`, `ups_status`, `notifications_overview`, `notification_feed`, `log_tail`, `owner`, `server_status` +- **`customization` tool** (5 subactions): `theme`, `public_theme`, `is_initial_setup`, `sso_enabled`, `set_theme` +- **`plugin` tool** (3 subactions): `list`, `add`, `remove` +- **`oidc` tool** (5 subactions): `providers`, `provider`, `configuration`, `public_providers`, `validate_session` +- **Persistent `SubscriptionManager`**: `unraid://live/*` MCP resources backed by long-lived WebSocket connections with auto-start and reconnection +- **`diagnose_subscriptions`** and **`test_subscription_query`** diagnostic tools +- `array`: Added `parity_history`, `start_array`, `stop_array`, `add_disk`, `remove_disk`, `mount_disk`, `unmount_disk`, `clear_disk_stats` +- `keys`: Added `add_role`, `remove_role` +- `settings`: Added `update_ssh` (confirm required) +- `stop_array` added to `_ARRAY_DESTRUCTIVE` +- `gate_destructive_action` helper in `core/guards.py` — centralized elicitation + confirm guard +- Full safety test suite: `TestNoGraphQLCallsWhenUnconfirmed` (zero-I/O guarantee for all 13 destructive actions) + +### Fixed +- Removed 29 actions confirmed absent from live API v4.29.2 via GraphQL introspection (Docker organizer mutations, `unassignedDevices`, `warningsAndAlerts`, etc.) +- `log_tail` path validated against allowlist before subscription start +- WebSocket auth uses `x-api-key` connectionParams format + +--- + +## [1.0.0] - 2026-03-14 through 2026-03-15 + +### Breaking Changes +- Credential storage moved to `~/.unraid-mcp/.env` (dir 700, file 600); all runtimes load from this path +- `unraid_health(action="setup")` is the only tool that triggers credential elicitation; all others propagate `CredentialsNotConfiguredError` + +### Added +- `CredentialsNotConfiguredError` sentinel — propagates cleanly through `tool_error_handler` with exact credential path in the error message +- `is_configured()` and `apply_runtime_config()` in `settings.py` for runtime credential injection +- `elicit_and_configure()` with `.env` persistence and confirmation before overwrite +- 28 GraphQL mutations across storage, docker, notifications, and new settings tool +- Comprehensive test suite expansion: schema validation (99 tests), HTTP layer (respx), property tests, safety audit, contract tests + +### Fixed +- Numerous PR review fixes across 50+ commits (CodeRabbit, ChatGPT-Codex review rounds) +- Shell scripts hardened against injection and null guards +- Notification enum validation, subscription lock split, safe_get semantics + +--- + +## [0.6.0] - 2026-03-15 + +### Added +- Subscription byte/line cap to prevent unbounded memory growth +- `asyncio.timeout` bounds on `subscribe_once` / `subscribe_collect` +- Partial auto-start for subscriptions (best-effort on startup) + +### Fixed +- WebSocket URL scheme handling (`ws://`/`wss://`) +- `flash_backup` path validation and smoke test assertions + +--- + +## [0.5.0] - 2026-03-15 + +*Tool expansion and live subscription foundation.* + +--- + +## [0.4.x] - 2026-03-13 through 2026-03-14 + +*Credential elicitation system, per-tool refactors, and mutation additions.* + +--- + +## [0.2.x] - 2026-02-15 through 2026-03-13 + +*Initial public release hardening: PR review cycles, test suite expansion, security fixes, plugin manifest.* + +--- + +## [0.1.0] - 2026-02-08 + +### Added +- Consolidated 26 tools into 10 tools with 90 actions +- FastMCP architecture migration with `uv` toolchain +- Docker Compose support with health checks +- WebSocket subscription infrastructure + +--- + +*Format: [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). Versioning: [Semantic Versioning](https://semver.org/).* diff --git a/CLAUDE.md b/CLAUDE.md index d182d6b..4e5e34b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -38,28 +38,26 @@ uv run ty check unraid_mcp/ uv run pytest ``` -### Docker Development -```bash -# Build the Docker image -docker build -t unraid-mcp-server . - -# Run with Docker Compose -docker compose up -d - -# View logs -docker compose logs -f unraid-mcp - -# Stop service -docker compose down -``` - ### Environment Setup -- Copy `.env.example` to `.env` and configure: - - `UNRAID_API_URL`: Unraid GraphQL endpoint (required) - - `UNRAID_API_KEY`: Unraid API key (required) - - `UNRAID_MCP_TRANSPORT`: Transport type (default: streamable-http) - - `UNRAID_MCP_PORT`: Server port (default: 6970) - - `UNRAID_MCP_HOST`: Server host (default: 0.0.0.0) +Copy `.env.example` to `.env` and configure: + +**Required:** +- `UNRAID_API_URL`: Unraid GraphQL endpoint +- `UNRAID_API_KEY`: Unraid API key + +**Server:** +- `UNRAID_MCP_LOG_LEVEL`: Log verbosity (default: INFO) +- `UNRAID_MCP_LOG_FILE`: Log filename in logs/ (default: unraid-mcp.log) + +**SSL/TLS:** +- `UNRAID_VERIFY_SSL`: SSL verification (default: true; set `false` for self-signed certs) + +**Subscriptions:** +- `UNRAID_AUTO_START_SUBSCRIPTIONS`: Auto-start live subscriptions on startup (default: true) +- `UNRAID_MAX_RECONNECT_ATTEMPTS`: WebSocket reconnect limit (default: 10) + +**Credentials override:** +- `UNRAID_CREDENTIALS_DIR`: Override the `~/.unraid-mcp/` credentials directory path ## Architecture @@ -68,10 +66,13 @@ docker compose down - **Entry Point**: `unraid_mcp/main.py` - Application entry point and startup logic - **Configuration**: `unraid_mcp/config/` - Settings management and logging configuration - **Core Infrastructure**: `unraid_mcp/core/` - GraphQL client, exceptions, and shared types + - `guards.py` — destructive action gating via MCP elicitation + - `utils.py` — shared helpers (`safe_get`, `safe_display_url`, path validation) + - `setup.py` — elicitation-based credential setup flow - **Subscriptions**: `unraid_mcp/subscriptions/` - Real-time WebSocket subscriptions and diagnostics - **Tools**: `unraid_mcp/tools/` - Domain-specific tool implementations - **GraphQL Client**: Uses httpx for async HTTP requests to Unraid API -- **Transport Layer**: Supports streamable-http (recommended), SSE (deprecated), and stdio +- **Version Helper**: `unraid_mcp/version.py` - Reads version from package metadata via importlib ### Key Design Patterns - **Consolidated Action Pattern**: Each tool uses `action: Literal[...]` parameter to expose multiple operations via a single MCP tool, reducing context window usage @@ -83,47 +84,56 @@ docker compose down - **Data Processing**: Tools return both human-readable summaries and detailed raw data - **Health Monitoring**: Comprehensive health check tool for system monitoring - **Real-time Subscriptions**: WebSocket-based live data streaming +- **Persistent Subscription Manager**: `live` action subactions use a shared `SubscriptionManager` + that maintains persistent WebSocket connections. Resources serve cached data via + `subscription_manager.get_resource_data(action)`. A "connecting" placeholder is returned + while the subscription starts — callers should retry in a moment. When + `UNRAID_AUTO_START_SUBSCRIPTIONS=false`, resources fall back to on-demand `subscribe_once`. -### Tool Categories (15 Tools, ~103 Actions) -1. **`unraid_info`** (18 actions): overview, array, network, registration, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config -2. **`unraid_array`** (13 actions): parity_start, parity_pause, parity_resume, parity_cancel, parity_status, parity_history, start_array, stop_array, add_disk, remove_disk, mount_disk, unmount_disk, clear_disk_stats -3. **`unraid_storage`** (6 actions): shares, disks, disk_details, log_files, logs, flash_backup -4. **`unraid_docker`** (7 actions): list, details, start, stop, restart, networks, network_details -5. **`unraid_vm`** (9 actions): list, details, start, stop, pause, resume, force_stop, reboot, reset -6. **`unraid_notifications`** (12 actions): overview, list, create, archive, unread, delete, delete_archived, archive_all, archive_many, unarchive_many, unarchive_all, recalculate -7. **`unraid_rclone`** (4 actions): list_remotes, config_form, create_remote, delete_remote -8. **`unraid_users`** (1 action): me -9. **`unraid_keys`** (7 actions): list, get, create, update, delete, add_role, remove_role -10. **`unraid_health`** (4 actions): check, test_connection, diagnose, setup -11. **`unraid_settings`** (2 actions): update, configure_ups -12. **`unraid_customization`** (5 actions): theme, public_theme, is_initial_setup, sso_enabled, set_theme -13. **`unraid_plugins`** (3 actions): list, add, remove -14. **`unraid_oidc`** (5 actions): providers, provider, configuration, public_providers, validate_session -15. **`unraid_live`** (11 actions): cpu, memory, cpu_telemetry, array_state, parity_progress, ups_status, notifications_overview, notification_feed, log_tail, owner, server_status +### Tool Categories (3 Tools: 1 Primary + 2 Diagnostic) + +The server registers **3 MCP tools**: +- **`unraid`** — primary tool with `action` (domain) + `subaction` (operation) routing, 107 subactions. Call it as `unraid(action="docker", subaction="list")`. +- **`diagnose_subscriptions`** — inspect subscription connection states, errors, and WebSocket URLs. +- **`test_subscription_query`** — test a specific GraphQL subscription query (allowlisted fields only). + +| action | subactions | +|--------|-----------| +| **system** (18) | overview, array, network, registration, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config | +| **health** (4) | check, test_connection, diagnose, setup | +| **array** (13) | parity_status, parity_history, parity_start, parity_pause, parity_resume, parity_cancel, start_array, stop_array*, add_disk, remove_disk*, mount_disk, unmount_disk, clear_disk_stats* | +| **disk** (6) | shares, disks, disk_details, log_files, logs, flash_backup* | +| **docker** (7) | list, details, start, stop, restart, networks, network_details | +| **vm** (9) | list, details, start, stop, pause, resume, force_stop*, reboot, reset* | +| **notification** (12) | overview, list, create, archive, mark_unread, recalculate, archive_all, archive_many, unarchive_many, unarchive_all, delete*, delete_archived* | +| **key** (7) | list, get, create, update, delete*, add_role, remove_role | +| **plugin** (3) | list, add, remove* | +| **rclone** (4) | list_remotes, config_form, create_remote, delete_remote* | +| **setting** (2) | update, configure_ups* | +| **customization** (5) | theme, public_theme, is_initial_setup, sso_enabled, set_theme | +| **oidc** (5) | providers, provider, configuration, public_providers, validate_session | +| **user** (1) | me | +| **live** (11) | cpu, memory, cpu_telemetry, array_state, parity_progress, ups_status, notifications_overview, notification_feed, log_tail, owner, server_status | + +`*` = destructive, requires `confirm=True` ### Destructive Actions (require `confirm=True`) -- **array**: remove_disk, clear_disk_stats +- **array**: stop_array, remove_disk, clear_disk_stats - **vm**: force_stop, reset -- **notifications**: delete, delete_archived +- **notification**: delete, delete_archived - **rclone**: delete_remote -- **keys**: delete -- **storage**: flash_backup -- **settings**: configure_ups -- **plugins**: remove +- **key**: delete +- **disk**: flash_backup +- **setting**: configure_ups +- **plugin**: remove ### Environment Variable Hierarchy The server loads environment variables from multiple locations in order: 1. `~/.unraid-mcp/.env` (primary — canonical credentials dir, all runtimes) 2. `~/.unraid-mcp/.env.local` (local overrides, only used if primary is absent) -3. `/app/.env.local` (Docker container mount) -4. `../.env.local` (project root local overrides) -5. `../.env` (project root fallback) -6. `unraid_mcp/.env` (last resort) - -### Transport Configuration -- **streamable-http** (recommended): HTTP-based transport on `/mcp` endpoint -- **sse** (deprecated): Server-Sent Events transport -- **stdio**: Standard input/output for direct integration +3. `../.env.local` (project root local overrides) +4. `../.env` (project root fallback) +5. `unraid_mcp/.env` (last resort) ### Error Handling Strategy - GraphQL errors are converted to ToolError with descriptive messages @@ -131,6 +141,14 @@ The server loads environment variables from multiple locations in order: - Network errors are caught and wrapped with connection context - All errors are logged with full context for debugging +### Middleware Chain +`server.py` wraps all tools in a 5-layer stack (order matters — outermost first): +1. **LoggingMiddleware** — logs every `tools/call` and `resources/read` with duration +2. **ErrorHandlingMiddleware** — converts unhandled exceptions to proper MCP errors +3. **SlidingWindowRateLimitingMiddleware** — 540 req/min sliding window +4. **ResponseLimitingMiddleware** — truncates responses > 512 KB with a clear suffix +5. **ResponseCachingMiddleware** — caching disabled entirely for `unraid` tool (mutations and reads share one tool name, so no per-subaction exclusion is possible) + ### Performance Considerations - Increased timeouts for disk operations (90s read timeout) - Selective queries to avoid GraphQL type overflow issues @@ -140,10 +158,10 @@ The server loads environment variables from multiple locations in order: ## Critical Gotchas ### Mutation Handler Ordering -**Mutation handlers MUST return before the `QUERIES[action]` lookup.** Mutations are not in the `QUERIES` dict — reaching that line for a mutation action causes a `KeyError`. Always add early-return `if action == "mutation_name": ... return` blocks BEFORE the `QUERIES` lookup. +**Mutation handlers MUST return before the domain query dict lookup.** Mutations are not in the domain `_*_QUERIES` dicts (e.g., `_DOCKER_QUERIES`, `_ARRAY_QUERIES`) — reaching that line for a mutation subaction causes a `KeyError`. Always add early-return `if subaction == "mutation_name": ... return` blocks BEFORE the queries lookup. ### Test Patching -- Patch at the **tool module level**: `unraid_mcp.tools.info.make_graphql_request` (not core) +- Patch at the **tool module level**: `unraid_mcp.tools.unraid.make_graphql_request` (not core) - `conftest.py`'s `mock_graphql_request` patches the core module — wrong for tool-level tests - Use `conftest.py`'s `make_tool_fn()` helper or local `_make_tool()` pattern @@ -155,7 +173,9 @@ tests/ ├── http_layer/ # httpx-level request/response tests (respx) ├── integration/ # WebSocket subscription lifecycle tests (slow) ├── safety/ # Destructive action guard tests -└── schema/ # GraphQL query validation (99 tests, all passing) +├── schema/ # GraphQL query validation (119 tests) +├── contract/ # Response shape contract tests +└── property/ # Input validation property-based tests ``` ### Running Targeted Tests @@ -169,17 +189,22 @@ uv run pytest -x # Fail fast on first error ### Scripts ```bash -# HTTP smoke-test against a live server (11 tools, all non-destructive actions) +# HTTP smoke-test against a live server (non-destructive actions, all domains) ./tests/mcporter/test-actions.sh [MCP_URL] # default: http://localhost:6970/mcp # stdio smoke-test, no running server needed (good for CI) ./tests/mcporter/test-tools.sh [--parallel] [--timeout-ms N] [--verbose] + +# Destructive action smoke-test (confirms guard blocks without confirm=True) +./tests/mcporter/test-destructive.sh [MCP_URL] ``` See `tests/mcporter/README.md` for transport differences and `docs/DESTRUCTIVE_ACTIONS.md` for exact destructive-action test commands. ### API Reference Docs - `docs/UNRAID_API_COMPLETE_REFERENCE.md` — Full GraphQL schema reference - `docs/UNRAID_API_OPERATIONS.md` — All supported operations with examples +- `docs/MARKETPLACE.md` — Plugin marketplace listing and publishing guide +- `docs/PUBLISHING.md` — Step-by-step instructions for publishing to Claude plugin registry Use these when adding new queries/mutations. @@ -189,10 +214,11 @@ When bumping the version, **always update both files** — they must stay in syn - `.claude-plugin/plugin.json` → `"version": "X.Y.Z"` ### Credential Storage (`~/.unraid-mcp/.env`) -All runtimes (plugin, direct, Docker) load credentials from `~/.unraid-mcp/.env`. -- **Plugin/direct:** `unraid_health action=setup` writes this file automatically via elicitation, +All runtimes (plugin, direct `uv run`) load credentials from `~/.unraid-mcp/.env`. +- **Plugin/direct:** `unraid action=health subaction=setup` writes this file automatically via elicitation, + **Safe to re-run**: always prompts for confirmation before overwriting existing credentials, + whether the connection is working or not (failed probe may be a transient outage, not bad creds). or manual: `mkdir -p ~/.unraid-mcp && cp .env.example ~/.unraid-mcp/.env` then edit. -- **Docker:** `docker-compose.yml` loads it via `env_file` before container start. - **No symlinks needed.** Version bumps do not affect this path. - **Permissions:** dir=700, file=600 (set automatically by elicitation; set manually if using `cp`: `chmod 700 ~/.unraid-mcp && chmod 600 ~/.unraid-mcp/.env`). diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index f8751d2..0000000 --- a/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -# Use an official Python runtime as a parent image -FROM python:3.12-slim - -# Set the working directory in the container -WORKDIR /app - -# Install uv (pinned tag to avoid mutable latest) -COPY --from=ghcr.io/astral-sh/uv:0.9.25 /uv /uvx /usr/local/bin/ - -# Create non-root user with home directory and give ownership of /app -RUN groupadd --gid 1000 appuser && \ - useradd --uid 1000 --gid 1000 --create-home --shell /bin/false appuser && \ - chown appuser:appuser /app - -# Copy dependency files (owned by appuser via --chown) -COPY --chown=appuser:appuser pyproject.toml . -COPY --chown=appuser:appuser uv.lock . -COPY --chown=appuser:appuser README.md . -COPY --chown=appuser:appuser LICENSE . - -# Copy the source code -COPY --chown=appuser:appuser unraid_mcp/ ./unraid_mcp/ - -# Switch to non-root user before installing dependencies -USER appuser - -# Install dependencies and the package -RUN uv sync --frozen - -# Make port UNRAID_MCP_PORT available to the world outside this container -# Defaulting to 6970, but can be overridden by environment variable -EXPOSE 6970 - -# Define environment variables (defaults, can be overridden at runtime) -ENV UNRAID_MCP_PORT=6970 -ENV UNRAID_MCP_HOST="0.0.0.0" -ENV UNRAID_MCP_TRANSPORT="streamable-http" -ENV UNRAID_API_URL="" -ENV UNRAID_API_KEY="" -ENV UNRAID_VERIFY_SSL="true" -ENV UNRAID_MCP_LOG_LEVEL="INFO" - -# Health check -HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ - CMD ["python", "-c", "import os, urllib.request; port = os.getenv('UNRAID_MCP_PORT', '6970'); urllib.request.urlopen(f'http://localhost:{port}/mcp')"] - -# Run unraid-mcp-server when the container launches -CMD ["uv", "run", "unraid-mcp-server"] diff --git a/README.md b/README.md index a02e411..d1df295 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,19 @@ # 🚀 Unraid MCP Server [![Python Version](https://img.shields.io/badge/python-3.12+-blue.svg)](https://www.python.org/downloads/) -[![FastMCP](https://img.shields.io/badge/FastMCP-2.11.2+-green.svg)](https://github.com/jlowin/fastmcp) +[![FastMCP](https://img.shields.io/badge/FastMCP-3.x-green.svg)](https://github.com/jlowin/fastmcp) [![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) **A powerful MCP (Model Context Protocol) server that provides comprehensive tools to interact with an Unraid server's GraphQL API.** ## ✨ Features -- 🔧 **11 Tools, ~104 Actions**: Complete Unraid management through MCP protocol -- 🏗️ **Modular Architecture**: Clean, maintainable, and extensible codebase +- 🔧 **1 primary tool + 2 diagnostic tools, 107 subactions**: Complete Unraid management through a consolidated MCP tool +- 🏗️ **Modular Architecture**: Clean, maintainable, and extensible codebase - ⚡ **High Performance**: Async/concurrent operations with optimized timeouts -- 🔄 **Real-time Data**: WebSocket subscriptions for live log streaming +- 🔄 **Real-time Data**: WebSocket subscriptions for live metrics, logs, array state, and more - 📊 **Health Monitoring**: Comprehensive system diagnostics and status -- 🐳 **Docker Ready**: Full containerization support with Docker Compose -- 🔒 **Secure**: Proper SSL/TLS configuration and API key management +- 🔒 **Secure**: Network-layer isolation - 📝 **Rich Logging**: Structured logging with rotation and multiple levels --- @@ -25,8 +24,8 @@ - [Quick Start](#-quick-start) - [Installation](#-installation) - [Configuration](#-configuration) +- [Authentication](#-authentication) - [Available Tools & Resources](#-available-tools--resources) -- [Custom Slash Commands](#-custom-slash-commands) - [Development](#-development) - [Architecture](#-architecture) - [Troubleshooting](#-troubleshooting) @@ -46,8 +45,7 @@ ``` This provides instant access to Unraid monitoring and management through Claude Code with: -- **11 MCP tools** exposing **~104 actions** via the consolidated action pattern -- **10 slash commands** for quick CLI-style access (`commands/`) +- **1 primary MCP tool** (`unraid`) exposing **107 subactions** via `action` + `subaction` routing, plus `diagnose_subscriptions` and `test_subscription_query` diagnostic tools - Real-time system metrics and health monitoring - Docker container and VM lifecycle management - Disk health monitoring and storage management @@ -57,11 +55,11 @@ This provides instant access to Unraid monitoring and management through Claude ### ⚙️ Credential Setup Credentials are stored in `~/.unraid-mcp/.env` — one location that works for the -Claude Code plugin, direct `uv run` invocations, and Docker. +Claude Code plugin and direct `uv run` invocations. **Option 1 — Interactive (Claude Code plugin, elicitation-supported clients):** ``` -unraid_health action=setup +unraid(action="health", subaction="setup") ``` The server prompts for your API URL and key, writes `~/.unraid-mcp/.env` automatically (created with mode 700/600), and activates credentials without restart. @@ -75,9 +73,6 @@ cp .env.example ~/.unraid-mcp/.env && chmod 600 ~/.unraid-mcp/.env # UNRAID_API_KEY=your-key-from-unraid-settings ``` -**Docker:** `~/.unraid-mcp/.env` is loaded via `env_file` in `docker-compose.yml` — -same file, no duplication needed. - > **Finding your API key:** Unraid → Settings → Management Access → API Keys --- @@ -85,8 +80,7 @@ same file, no duplication needed. ## 🚀 Quick Start ### Prerequisites -- Docker and Docker Compose (recommended) -- OR Python 3.12+ with [uv](https://github.com/astral-sh/uv) for development +- Python 3.12+ with [uv](https://github.com/astral-sh/uv) for development - Unraid server with GraphQL API enabled ### 1. Clone Repository @@ -97,20 +91,16 @@ cd unraid-mcp ### 2. Configure Environment ```bash +# Canonical credential location (all runtimes) +mkdir -p ~/.unraid-mcp && chmod 700 ~/.unraid-mcp +cp .env.example ~/.unraid-mcp/.env && chmod 600 ~/.unraid-mcp/.env +# Edit ~/.unraid-mcp/.env with your values + +# For local development only cp .env.example .env -# Edit .env with your Unraid API details ``` -### 3. Deploy with Docker (Recommended) -```bash -# Start with Docker Compose -docker compose up -d - -# View logs -docker compose logs -f unraid-mcp -``` - -### OR 3. Run for Development +### 3. Run for Development ```bash # Install dependencies uv sync @@ -130,15 +120,13 @@ unraid-mcp/ # ${CLAUDE_PLUGIN_ROOT} ├── .claude-plugin/ │ ├── marketplace.json # Marketplace catalog │ └── plugin.json # Plugin manifest -├── commands/ # 10 custom slash commands ├── unraid_mcp/ # MCP server Python package ├── skills/unraid/ # Skill and documentation ├── pyproject.toml # Dependencies and entry points └── scripts/ # Validation and helper scripts ``` -- **MCP Server**: 11 tools with ~104 actions via GraphQL API -- **Slash Commands**: 10 commands in `commands/` for quick CLI-style access +- **MCP Server**: 3 tools — `unraid` (107 subactions) + `diagnose_subscriptions` + `test_subscription_query` - **Skill**: `/unraid` skill for monitoring and queries - **Entry Point**: `unraid-mcp-server` defined in pyproject.toml @@ -146,38 +134,6 @@ unraid-mcp/ # ${CLAUDE_PLUGIN_ROOT} ## 📦 Installation -### 🐳 Docker Deployment (Recommended) - -The easiest way to run the Unraid MCP Server is with Docker: - -```bash -# Clone repository -git clone https://github.com/jmagar/unraid-mcp -cd unraid-mcp - -# Set required environment variables -export UNRAID_API_URL="http://your-unraid-server/graphql" -export UNRAID_API_KEY="your_api_key_here" - -# Deploy with Docker Compose -docker compose up -d - -# View logs -docker compose logs -f unraid-mcp -``` - -#### Manual Docker Build -```bash -# Build and run manually -docker build -t unraid-mcp-server . -docker run -d --name unraid-mcp \ - --restart unless-stopped \ - -p 6970:6970 \ - -e UNRAID_API_URL="http://your-unraid-server/graphql" \ - -e UNRAID_API_KEY="your_api_key_here" \ - unraid-mcp-server -``` - ### 🔧 Development Installation For development and testing: @@ -207,7 +163,7 @@ uv run unraid-mcp-server ### Environment Variables -Create `.env` file in the project root: +Credentials and settings go in `~/.unraid-mcp/.env` (the canonical location loaded by all runtimes — plugin and direct `uv run`). See the [Credential Setup](#%EF%B8%8F-credential-setup) section above for how to create it. ```bash # Core API Configuration (Required) @@ -215,7 +171,7 @@ UNRAID_API_URL=https://your-unraid-server-url/graphql UNRAID_API_KEY=your_unraid_api_key # MCP Server Settings -UNRAID_MCP_TRANSPORT=streamable-http # streamable-http (recommended), sse (deprecated), stdio +UNRAID_MCP_TRANSPORT=stdio # stdio (default) UNRAID_MCP_HOST=0.0.0.0 UNRAID_MCP_PORT=6970 @@ -223,105 +179,82 @@ UNRAID_MCP_PORT=6970 UNRAID_MCP_LOG_LEVEL=INFO # DEBUG, INFO, WARNING, ERROR UNRAID_MCP_LOG_FILE=unraid-mcp.log -# SSL/TLS Configuration +# SSL/TLS Configuration UNRAID_VERIFY_SSL=true # true, false, or path to CA bundle -# Optional: Log Stream Configuration -# UNRAID_AUTOSTART_LOG_PATH=/var/log/syslog # Path for log streaming resource +# Subscription Configuration +UNRAID_AUTO_START_SUBSCRIPTIONS=true # Auto-start WebSocket subscriptions on startup (default: true) +UNRAID_MAX_RECONNECT_ATTEMPTS=10 # Max WebSocket reconnection attempts (default: 10) + +# Optional: Auto-start log file subscription path +# Defaults to /var/log/syslog if it exists and this is unset +# UNRAID_AUTOSTART_LOG_PATH=/var/log/syslog + +# Optional: Credentials directory override (default: ~/.unraid-mcp/) +# Useful for containers or non-standard home directory layouts +# UNRAID_CREDENTIALS_DIR=/custom/path/to/credentials ``` -### Transport Options - -| Transport | Description | Use Case | -|-----------|-------------|----------| -| `streamable-http` | HTTP-based (recommended) | Most compatible, best performance | -| `sse` | Server-Sent Events (deprecated) | Legacy support only | -| `stdio` | Standard I/O | Direct integration scenarios | - --- ## 🛠️ Available Tools & Resources -Each tool uses a consolidated `action` parameter to expose multiple operations, reducing context window usage. Destructive actions require `confirm=True`. +The single `unraid` tool uses `action` (domain) + `subaction` (operation) routing to expose all operations via one MCP tool, minimizing context window usage. Destructive actions require `confirm=True`. -### Tool Categories (11 Tools, ~104 Actions) +### Primary Tool: 15 Domains, 107 Subactions -| Tool | Actions | Description | -|------|---------|-------------| -| **`unraid_info`** | 21 | overview, array, network, registration, connect, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config, update_server, update_ssh | -| **`unraid_array`** | 5 | parity_start, parity_pause, parity_resume, parity_cancel, parity_status | -| **`unraid_storage`** | 7 | shares, disks, disk_details, unassigned, log_files, logs, flash_backup | -| **`unraid_docker`** | 26 | list, details, start, stop, restart, pause, unpause, remove, update, update_all, logs, networks, network_details, port_conflicts, check_updates, create_folder, set_folder_children, delete_entries, move_to_folder, move_to_position, rename_folder, create_folder_with_items, update_view_prefs, sync_templates, reset_template_mappings, refresh_digests | -| **`unraid_vm`** | 9 | list, details, start, stop, pause, resume, force_stop, reboot, reset | -| **`unraid_notifications`** | 14 | overview, list, warnings, create, create_unique, archive, archive_many, unread, unarchive_many, unarchive_all, recalculate, delete, delete_archived, archive_all | -| **`unraid_rclone`** | 4 | list_remotes, config_form, create_remote, delete_remote | -| **`unraid_users`** | 1 | me | -| **`unraid_keys`** | 5 | list, get, create, update, delete | -| **`unraid_health`** | 3 | check, test_connection, diagnose | -| **`unraid_settings`** | 9 | update, update_temperature, update_time, configure_ups, update_api, connect_sign_in, connect_sign_out, setup_remote_access, enable_dynamic_remote_access | +Call pattern: `unraid(action="", subaction="")` -### MCP Resources (Real-time Data) -- `unraid://logs/stream` - Live log streaming from `/var/log/syslog` with WebSocket subscriptions +| action= | Subactions | Description | +|---------|-----------|-------------| +| **`system`** | overview, array, network, registration, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config | Server info, metrics, network, UPS (18 subactions) | +| **`health`** | check, test_connection, diagnose, setup | Health checks, connection test, diagnostics, interactive setup (4 subactions) | +| **`array`** | parity_status, parity_history, parity_start, parity_pause, parity_resume, parity_cancel, start_array, stop_array, add_disk, remove_disk, mount_disk, unmount_disk, clear_disk_stats | Parity checks, array state, disk operations (13 subactions) | +| **`disk`** | shares, disks, disk_details, log_files, logs, flash_backup | Shares, physical disks, log files (6 subactions) | +| **`docker`** | list, details, start, stop, restart, networks, network_details | Container lifecycle and network inspection (7 subactions) | +| **`vm`** | list, details, start, stop, pause, resume, force_stop, reboot, reset | Virtual machine lifecycle (9 subactions) | +| **`notification`** | overview, list, create, archive, mark_unread, delete, delete_archived, archive_all, archive_many, unarchive_many, unarchive_all, recalculate | System notifications CRUD (12 subactions) | +| **`key`** | list, get, create, update, delete, add_role, remove_role | API key management (7 subactions) | +| **`plugin`** | list, add, remove | Plugin management (3 subactions) | +| **`rclone`** | list_remotes, config_form, create_remote, delete_remote | Cloud storage remote management (4 subactions) | +| **`setting`** | update, configure_ups | System settings and UPS config (2 subactions) | +| **`customization`** | theme, public_theme, is_initial_setup, sso_enabled, set_theme | Theme and UI customization (5 subactions) | +| **`oidc`** | providers, provider, configuration, public_providers, validate_session | OIDC/SSO provider management (5 subactions) | +| **`user`** | me | Current authenticated user (1 subaction) | +| **`live`** | cpu, memory, cpu_telemetry, array_state, parity_progress, ups_status, notifications_overview, owner, server_status, log_tail, notification_feed | Real-time WebSocket subscription snapshots (11 subactions) | -> **Note**: MCP Resources provide real-time data streams that can be accessed via MCP clients. The log stream resource automatically connects to your Unraid system logs and provides live updates. +### Destructive Actions (require `confirm=True`) +- **array**: `stop_array`, `remove_disk`, `clear_disk_stats` +- **vm**: `force_stop`, `reset` +- **notification**: `delete`, `delete_archived` +- **rclone**: `delete_remote` +- **key**: `delete` +- **disk**: `flash_backup` +- **setting**: `configure_ups` +- **plugin**: `remove` ---- +### MCP Resources (Real-time Cached Data) -## 💬 Custom Slash Commands +The server exposes two classes of MCP resources backed by persistent WebSocket connections: -The project includes **10 custom slash commands** in `commands/` for quick access to Unraid operations: +**`unraid://live/*` — 9 snapshot resources** (auto-started, always-cached): +- `unraid://live/cpu` — CPU utilization +- `unraid://live/memory` — Memory usage +- `unraid://live/cpu_telemetry` — Detailed CPU telemetry +- `unraid://live/array_state` — Array state changes +- `unraid://live/parity_progress` — Parity check progress +- `unraid://live/ups_status` — UPS status +- `unraid://live/notifications_overview` — Notification counts +- `unraid://live/owner` — Owner info changes +- `unraid://live/server_status` — Server status changes -### Available Commands +**`unraid://logs/stream`** — Live log file tail (path controlled by `UNRAID_AUTOSTART_LOG_PATH`) -| Command | Actions | Quick Access | -|---------|---------|--------------| -| `/info` | 21 | System information, metrics, configuration | -| `/array` | 5 | Parity check management | -| `/storage` | 7 | Shares, disks, logs | -| `/docker` | 26 | Container management and monitoring | -| `/vm` | 9 | Virtual machine lifecycle | -| `/notifications` | 14 | Alert management | -| `/rclone` | 4 | Cloud storage remotes | -| `/users` | 1 | Current user query | -| `/keys` | 5 | API key management | -| `/health` | 3 | System health checks | +> **Note**: Resources return cached data from persistent WebSocket subscriptions. A `{"status": "connecting"}` placeholder is returned while the subscription initializes — retry in a moment. +> +> **`log_tail`** is accessible as a tool subaction (`unraid(action="live", subaction="log_tail", path="/var/log/syslog")`) and requires a `path`; **`notification_feed`** is also available as a tool subaction but uses a transient one-shot subscription and accepts optional parameters. Neither is registered as an MCP resource. -### Example Usage - -```bash -# System monitoring -/info overview -/health check -/storage shares - -# Container management -/docker list -/docker start plex -/docker logs nginx - -# VM operations -/vm list -/vm start windows-10 - -# Notifications -/notifications warnings -/notifications archive_all - -# User management -/users list -/keys create "Automation Key" "For CI/CD" -``` - -### Command Features - -Each slash command provides: -- **Comprehensive documentation** of all available actions -- **Argument hints** for required parameters -- **Safety warnings** for destructive operations (⚠️) -- **Usage examples** for common scenarios -- **Action categorization** (Query, Lifecycle, Management, Destructive) - -Run any command without arguments to see full documentation, or type `/help` to list all available commands. +> **Security note**: The `disk/logs` and `live/log_tail` subactions allow reading files under `/var/log/` and `/boot/logs/` on the Unraid server. Authenticated MCP clients can stream any log file within these directories. --- @@ -333,32 +266,41 @@ Run any command without arguments to see full documentation, or type `/help` to unraid-mcp/ ├── unraid_mcp/ # Main package │ ├── main.py # Entry point +│ ├── server.py # FastMCP server setup +│ ├── version.py # Version management (importlib.metadata) │ ├── config/ # Configuration management │ │ ├── settings.py # Environment & settings │ │ └── logging.py # Logging setup -│ ├── core/ # Core infrastructure +│ ├── core/ # Core infrastructure │ │ ├── client.py # GraphQL client │ │ ├── exceptions.py # Custom exceptions -│ │ └── types.py # Shared data types +│ │ ├── guards.py # Destructive action guards +│ │ ├── setup.py # Interactive credential setup +│ │ ├── types.py # Shared data types +│ │ └── utils.py # Utility functions │ ├── subscriptions/ # Real-time subscriptions -│ │ ├── manager.py # WebSocket management -│ │ ├── resources.py # MCP resources -│ │ └── diagnostics.py # Diagnostic tools -│ ├── tools/ # MCP tool categories (11 tools, ~104 actions) -│ │ ├── info.py # System information (21 actions) -│ │ ├── array.py # Parity checks (5 actions) -│ │ ├── storage.py # Storage & monitoring (7 actions) -│ │ ├── docker.py # Container management (26 actions) -│ │ ├── virtualization.py # VM management (9 actions) -│ │ ├── notifications.py # Notification management (14 actions) -│ │ ├── rclone.py # Cloud storage (4 actions) -│ │ ├── users.py # Current user query (1 action) -│ │ ├── keys.py # API key management (5 actions) -│ │ ├── settings.py # Server settings (9 actions) -│ │ └── health.py # Health checks (3 actions) -│ └── server.py # FastMCP server setup -├── logs/ # Log files (auto-created) -└── docker-compose.yml # Docker Compose deployment +│ │ ├── manager.py # Persistent WebSocket manager +│ │ ├── resources.py # MCP resources (unraid://live/*) +│ │ ├── snapshot.py # Transient subscribe_once helpers +│ │ ├── queries.py # Subscription query constants +│ │ ├── diagnostics.py # Diagnostic tools +│ │ └── utils.py # Subscription utility functions +│ └── tools/ # Consolidated tools (unraid: 107 subactions + 2 diagnostic tools) +│ └── unraid.py # All 15 domains in one file +├── tests/ # Test suite +│ ├── conftest.py # Shared fixtures +│ ├── test_*.py # Unit tests (per domain) +│ ├── http_layer/ # httpx-level request tests +│ ├── integration/ # WebSocket lifecycle tests +│ ├── safety/ # Destructive action guard tests +│ └── schema/ # GraphQL query validation +├── docs/ # Documentation & API references +├── scripts/ # Build and utility scripts +├── skills/unraid/ # Claude skill assets +├── .claude-plugin/ # Plugin manifest & marketplace config +├── .env.example # Environment template +├── pyproject.toml # Project config & dependencies +└── logs/ # Log files (auto-created, gitignored) ``` ### Code Quality Commands @@ -376,17 +318,14 @@ uv run pytest ### Integration Smoke-Tests (mcporter) -Live integration tests that exercise all non-destructive actions via [mcporter](https://github.com/mcporter/mcporter). Two scripts cover two transport modes: +Live integration tests that exercise all non-destructive actions via [mcporter](https://github.com/mcporter/mcporter). ```bash # stdio — no running server needed (good for CI) ./tests/mcporter/test-tools.sh [--parallel] [--timeout-ms N] [--verbose] - -# HTTP — connects to a live server (most up-to-date coverage) -./tests/mcporter/test-actions.sh [MCP_URL] # default: http://localhost:6970/mcp ``` -Destructive actions are always skipped in both scripts. For safe testing strategies and exact mcporter commands per destructive action, see [`docs/DESTRUCTIVE_ACTIONS.md`](docs/DESTRUCTIVE_ACTIONS.md). +Destructive actions are always skipped. For safe testing strategies and exact mcporter commands per destructive action, see [`docs/DESTRUCTIVE_ACTIONS.md`](docs/DESTRUCTIVE_ACTIONS.md). ### API Schema Docs Automation ```bash @@ -409,6 +348,16 @@ uv run unraid-mcp-server # Or run via module directly uv run -m unraid_mcp.main + +# Run via named config files +fastmcp run fastmcp.stdio.json # stdio transport +``` + +### Ad-hoc Tool Testing (fastmcp CLI) +```bash +# Call without a running server (stdio config) +fastmcp list fastmcp.stdio.json +fastmcp call fastmcp.stdio.json unraid action=health subaction=check ``` --- diff --git a/commands/array.md b/commands/array.md deleted file mode 100644 index 1b294e9..0000000 --- a/commands/array.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -description: Manage Unraid array parity checks -argument-hint: [action] [correct=true/false] ---- - -Execute the `unraid_array` MCP tool with action: `$1` - -## Available Actions (5) - -**Parity Check Operations:** -- `parity_start` - Start parity check/sync (optional: correct=true to fix errors) -- `parity_pause` - Pause running parity operation -- `parity_resume` - Resume paused parity operation -- `parity_cancel` - Cancel running parity operation -- `parity_status` - Get current parity check status - -## Example Usage - -``` -/array parity_start -/array parity_start correct=true -/array parity_pause -/array parity_resume -/array parity_cancel -/array parity_status -``` - -**Note:** Use `correct=true` with `parity_start` to automatically fix any parity errors found during the check. - -Use the tool to execute the requested parity operation and report the results. diff --git a/commands/docker.md b/commands/docker.md deleted file mode 100644 index 95b753d..0000000 --- a/commands/docker.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -description: Manage Docker containers on Unraid -argument-hint: [action] [additional-args] ---- - -Execute the `unraid_docker` MCP tool with action: `$1` - -## Available Actions (15) - -**Query Operations:** -- `list` - List all Docker containers with status -- `details` - Get detailed info for a container (requires container identifier) -- `logs` - Get container logs (requires container identifier) -- `check_updates` - Check for available container updates -- `port_conflicts` - Identify port conflicts -- `networks` - List Docker networks -- `network_details` - Get network details (requires network identifier) - -**Container Lifecycle:** -- `start` - Start a stopped container (requires container identifier) -- `stop` - Stop a running container (requires container identifier) -- `restart` - Restart a container (requires container identifier) -- `pause` - Pause a running container (requires container identifier) -- `unpause` - Unpause a paused container (requires container identifier) - -**Updates & Management:** -- `update` - Update a specific container (requires container identifier) -- `update_all` - Update all containers with available updates - -**⚠️ Destructive:** -- `remove` - Permanently delete a container (requires container identifier + confirmation) - -## Example Usage - -``` -/unraid-docker list -/unraid-docker details plex -/unraid-docker logs plex -/unraid-docker start nginx -/unraid-docker restart sonarr -/unraid-docker check_updates -/unraid-docker update plex -/unraid-docker port_conflicts -``` - -**Container Identification:** Use container name, ID, or partial match (fuzzy search supported) - -Use the tool to execute the requested Docker operation and report the results. diff --git a/commands/health.md b/commands/health.md deleted file mode 100644 index 526088a..0000000 --- a/commands/health.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -description: Check Unraid system health and connectivity -argument-hint: [action] ---- - -Execute the `unraid_health` MCP tool with action: `$1` - -## Available Actions (3) - -**Health Monitoring:** -- `check` - Comprehensive health check of all system components -- `test_connection` - Test basic API connectivity -- `diagnose` - Detailed diagnostic information for troubleshooting - -## What Each Action Checks - -### `check` - System Health -- API connectivity and response time -- Array status and disk health -- Running services status -- Docker container health -- VM status -- System resources (CPU, RAM, disk I/O) -- Network connectivity -- UPS status (if configured) - -Returns: Overall health status (`HEALTHY`, `WARNING`, `CRITICAL`) with component details - -### `test_connection` - Connectivity -- GraphQL endpoint availability -- Authentication validity -- Basic query execution -- Network latency - -Returns: Connection status and latency metrics - -### `diagnose` - Diagnostic Details -- Full system configuration -- Resource utilization trends -- Error logs and warnings -- Component-level diagnostics -- Troubleshooting recommendations - -Returns: Detailed diagnostic report - -## Example Usage - -``` -/unraid-health check -/unraid-health test_connection -/unraid-health diagnose -``` - -**Use Cases:** -- `check` - Quick health status (monitoring dashboards) -- `test_connection` - Verify API access (troubleshooting) -- `diagnose` - Deep dive debugging (issue resolution) - -Use the tool to execute the requested health check and present results with clear severity indicators. diff --git a/commands/info.md b/commands/info.md deleted file mode 100644 index 6fd79f3..0000000 --- a/commands/info.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -description: Query Unraid server information and configuration -argument-hint: [action] [additional-args] ---- - -Execute the `unraid_info` MCP tool with action: `$1` - -## Available Actions (19) - -**System Overview:** -- `overview` - Complete system summary with all key metrics -- `server` - Server details (hostname, version, uptime) -- `servers` - List all known Unraid servers - -**Array & Storage:** -- `array` - Array status, disks, and health - -**Network & Registration:** -- `network` - Network configuration and interfaces -- `registration` - Registration status and license info -- `connect` - Connect service configuration -- `online` - Online status check - -**Configuration:** -- `config` - System configuration settings -- `settings` - User settings and preferences -- `variables` - Environment variables -- `display` - Display settings - -**Services & Monitoring:** -- `services` - Running services status -- `metrics` - System metrics (CPU, RAM, disk I/O) -- `ups_devices` - List all UPS devices -- `ups_device` - Get specific UPS device details (requires device_id) -- `ups_config` - UPS configuration - -**Ownership:** -- `owner` - Server owner information -- `flash` - USB flash drive details - -## Example Usage - -``` -/unraid-info overview -/unraid-info array -/unraid-info metrics -/unraid-info ups_device [device-id] -``` - -Use the tool to retrieve the requested information and present it in a clear, formatted manner. diff --git a/commands/keys.md b/commands/keys.md deleted file mode 100644 index 56bf8f8..0000000 --- a/commands/keys.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Manage Unraid API keys for authentication -argument-hint: [action] [key-id] ---- - -Execute the `unraid_keys` MCP tool with action: `$1` - -## Available Actions (5) - -**Query Operations:** -- `list` - List all API keys with metadata -- `get` - Get details for a specific API key (requires key_id) - -**Management Operations:** -- `create` - Create a new API key (requires name, optional description and expiry) -- `update` - Update an existing API key (requires key_id, name, description) - -**⚠️ Destructive:** -- `delete` - Permanently revoke an API key (requires key_id + confirmation) - -## Example Usage - -``` -/unraid-keys list -/unraid-keys get [key-id] -/unraid-keys create "MCP Server Key" "Key for unraid-mcp integration" -/unraid-keys update [key-id] "Updated Name" "Updated description" -``` - -**Key Format:** PrefixedID (`hex64:suffix`) - -**IMPORTANT:** -- Deleted keys are immediately revoked and cannot be recovered -- Store new keys securely - they're only shown once during creation -- Set expiry dates for keys used in automation - -Use the tool to execute the requested API key operation and report the results. diff --git a/commands/notifications.md b/commands/notifications.md deleted file mode 100644 index 84716c4..0000000 --- a/commands/notifications.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -description: Manage Unraid system notifications and alerts -argument-hint: [action] [additional-args] ---- - -Execute the `unraid_notifications` MCP tool with action: `$1` - -## Available Actions (9) - -**Query Operations:** -- `overview` - Summary of notification counts by category -- `list` - List all notifications with details -- `warnings` - List only warning/error notifications -- `unread` - List unread notifications only - -**Management Operations:** -- `create` - Create a new notification (requires title, message, severity) -- `archive` - Archive a specific notification (requires notification_id) -- `archive_all` - Archive all current notifications - -**⚠️ Destructive Operations:** -- `delete` - Permanently delete a notification (requires notification_id + confirmation) -- `delete_archived` - Permanently delete all archived notifications (requires confirmation) - -## Example Usage - -``` -/unraid-notifications overview -/unraid-notifications list -/unraid-notifications warnings -/unraid-notifications unread -/unraid-notifications create "Test Alert" "This is a test" normal -/unraid-notifications archive [notification-id] -/unraid-notifications archive_all -``` - -**Severity Levels:** `normal`, `warning`, `alert`, `critical` - -**IMPORTANT:** Delete operations are permanent and cannot be undone. - -Use the tool to execute the requested notification operation and present results clearly. diff --git a/commands/rclone.md b/commands/rclone.md deleted file mode 100644 index 68124e4..0000000 --- a/commands/rclone.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -description: Manage Rclone cloud storage remotes on Unraid -argument-hint: [action] [remote-name] ---- - -Execute the `unraid_rclone` MCP tool with action: `$1` - -## Available Actions (4) - -**Query Operations:** -- `list_remotes` - List all configured Rclone remotes -- `config_form` - Get configuration form for a remote type (requires remote_type) - -**Management Operations:** -- `create_remote` - Create a new Rclone remote (requires remote_name, remote_type, config) - -**⚠️ Destructive:** -- `delete_remote` - Permanently delete a remote (requires remote_name + confirmation) - -## Example Usage - -``` -/unraid-rclone list_remotes -/unraid-rclone config_form s3 -/unraid-rclone create_remote mybackup s3 {"access_key":"...","secret_key":"..."} -``` - -**Supported Remote Types:** s3, dropbox, google-drive, onedrive, backblaze, ftp, sftp, webdav, etc. - -**IMPORTANT:** Deleting a remote does NOT delete cloud data, only the local configuration. - -Use the tool to execute the requested Rclone operation and report the results. diff --git a/commands/settings.md b/commands/settings.md deleted file mode 100644 index 59316af..0000000 --- a/commands/settings.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -description: Manage Unraid system settings and configuration -argument-hint: [action] [additional-args] ---- - -Execute the `unraid_settings` MCP tool with action: `$1` - -## Available Actions (9) - -All settings actions are mutations that modify server configuration. - -**General Settings:** -- `update` - Update general system settings (timezone, locale, etc.) -- `update_temperature` - Update temperature unit preference (Celsius/Fahrenheit) -- `update_time` - Update NTP and time configuration - -**UPS Configuration:** -- `configure_ups` - Configure UPS settings (requires `confirm=True` — DESTRUCTIVE) - -**API & Connectivity:** -- `update_api` - Update Unraid Connect API settings - -**Unraid Connect (My Servers):** -- `connect_sign_in` - Sign in to Unraid Connect cloud service -- `connect_sign_out` - Sign out of Unraid Connect cloud service - -**Remote Access:** -- `setup_remote_access` - Configure remote access settings (requires `confirm=True` — DESTRUCTIVE) -- `enable_dynamic_remote_access` - Enable/configure dynamic remote access (requires `confirm=True` — DESTRUCTIVE) - -## Example Usage - -``` -/unraid-settings update -/unraid-settings update_temperature -/unraid-settings update_time -/unraid-settings update_api -/unraid-settings connect_sign_in -/unraid-settings connect_sign_out -``` - -**⚠️ Destructive Operations (require `confirm=True`):** -- `configure_ups` - Modifies UPS hardware configuration -- `setup_remote_access` - Changes network access policies -- `enable_dynamic_remote_access` - Changes network access policies - -**IMPORTANT:** Settings changes take effect immediately and may affect server accessibility. - -Use the tool to execute the requested settings operation and report the results. diff --git a/commands/storage.md b/commands/storage.md deleted file mode 100644 index 37acb37..0000000 --- a/commands/storage.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: Query Unraid storage, shares, and disk information -argument-hint: [action] [additional-args] ---- - -Execute the `unraid_storage` MCP tool with action: `$1` - -## Available Actions (6) - -**Shares & Disks:** -- `shares` - List all user shares with sizes and allocation -- `disks` - List all disks in the array -- `disk_details` - Get detailed info for a specific disk (requires disk identifier) -- `unassigned` - List unassigned devices - -**Logs:** -- `log_files` - List available system log files -- `logs` - Read log file contents (requires log file path) - -## Example Usage - -``` -/unraid-storage shares -/unraid-storage disks -/unraid-storage disk_details disk1 -/unraid-storage unassigned -/unraid-storage log_files -/unraid-storage logs /var/log/syslog -``` - -**Note:** Log file paths must start with `/var/log/`, `/boot/logs/`, or `/mnt/` - -Use the tool to retrieve the requested storage information and present it clearly. diff --git a/commands/users.md b/commands/users.md deleted file mode 100644 index b4a1033..0000000 --- a/commands/users.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -description: Query current authenticated Unraid user -argument-hint: [action] ---- - -Execute the `unraid_users` MCP tool with action: `$1` - -## Available Actions (1) - -**Query Operation:** -- `me` - Get current authenticated user info (id, name, description, roles) - -## Example Usage - -``` -/users me -``` - -## API Limitation - -⚠️ **Note:** The Unraid GraphQL API does not support user management operations. Only the `me` query is available, which returns information about the currently authenticated user (the API key holder). - -**Not supported:** -- Listing all users -- Getting other user details -- Adding/deleting users -- Cloud/remote access queries - -For user management, use the Unraid web UI. - -Use the tool to query the current authenticated user and report the results. diff --git a/commands/vm.md b/commands/vm.md deleted file mode 100644 index 78923e0..0000000 --- a/commands/vm.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -description: Manage virtual machines on Unraid -argument-hint: [action] [vm-id] ---- - -Execute the `unraid_vm` MCP tool with action: `$1` and vm_id: `$2` - -## Available Actions (9) - -**Query Operations:** -- `list` - List all VMs with status and resource allocation -- `details` - Get detailed info for a VM (requires vm_id) - -**Lifecycle Operations:** -- `start` - Start a stopped VM (requires vm_id) -- `stop` - Gracefully stop a running VM (requires vm_id) -- `pause` - Pause a running VM (requires vm_id) -- `resume` - Resume a paused VM (requires vm_id) -- `reboot` - Gracefully reboot a VM (requires vm_id) - -**⚠️ Destructive Operations:** -- `force_stop` - Forcefully power off VM (like pulling power cord - requires vm_id + confirmation) -- `reset` - Hard reset VM (power cycle without graceful shutdown - requires vm_id + confirmation) - -## Example Usage - -``` -/unraid-vm list -/unraid-vm details windows-10 -/unraid-vm start ubuntu-server -/unraid-vm stop windows-10 -/unraid-vm pause debian-vm -/unraid-vm resume debian-vm -/unraid-vm reboot ubuntu-server -``` - -**VM Identification:** Use VM ID (PrefixedID format: `hex64:suffix`) - -**IMPORTANT:** `force_stop` and `reset` bypass graceful shutdown and may corrupt VM filesystem. Use `stop` instead for safe shutdowns. - -Use the tool to execute the requested VM operation and report the results. diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 98439c6..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,49 +0,0 @@ -services: - unraid-mcp: - build: - context: . - dockerfile: Dockerfile - container_name: unraid-mcp - restart: unless-stopped - read_only: true - cap_drop: - - ALL - tmpfs: - - /tmp:noexec,nosuid,size=64m - - /app/logs:noexec,nosuid,size=16m - - /app/.cache/logs:noexec,nosuid,size=8m - ports: - # HostPort:ContainerPort (maps to UNRAID_MCP_PORT inside the container, default 6970) - # Change the host port (left side) if 6970 is already in use on your host - - "${UNRAID_MCP_PORT:-6970}:${UNRAID_MCP_PORT:-6970}" - env_file: - - path: ${HOME}/.unraid-mcp/.env - required: false # Don't fail if file missing; environment: block below takes over - environment: - # Core API Configuration (Required) - # Sourced from ~/.unraid-mcp/.env via env_file above (if present), - # or set these directly here. The :? syntax fails fast if unset. - - UNRAID_API_URL=${UNRAID_API_URL:?UNRAID_API_URL is required} - - UNRAID_API_KEY=${UNRAID_API_KEY:?UNRAID_API_KEY is required} - - # MCP Server Settings - - UNRAID_MCP_PORT=${UNRAID_MCP_PORT:-6970} - - UNRAID_MCP_HOST=${UNRAID_MCP_HOST:-0.0.0.0} - - UNRAID_MCP_TRANSPORT=${UNRAID_MCP_TRANSPORT:-streamable-http} - - # SSL Configuration - - UNRAID_VERIFY_SSL=${UNRAID_VERIFY_SSL:-true} - - # Logging Configuration - - UNRAID_MCP_LOG_LEVEL=${UNRAID_MCP_LOG_LEVEL:-INFO} - - UNRAID_MCP_LOG_FILE=${UNRAID_MCP_LOG_FILE:-unraid-mcp.log} - - # Real-time Subscription Configuration - - UNRAID_AUTO_START_SUBSCRIPTIONS=${UNRAID_AUTO_START_SUBSCRIPTIONS:-true} - - UNRAID_MAX_RECONNECT_ATTEMPTS=${UNRAID_MAX_RECONNECT_ATTEMPTS:-10} - - # Optional: Custom log file path for subscription auto-start diagnostics - - UNRAID_AUTOSTART_LOG_PATH=${UNRAID_AUTOSTART_LOG_PATH} - # Optional: If you want to mount a specific directory for logs (ensure UNRAID_MCP_LOG_FILE points within this mount) - # volumes: - # - ./logs:/app/logs # Example: maps ./logs on host to /app/logs in container diff --git a/docs/DESTRUCTIVE_ACTIONS.md b/docs/DESTRUCTIVE_ACTIONS.md index e31be8d..041d007 100644 --- a/docs/DESTRUCTIVE_ACTIONS.md +++ b/docs/DESTRUCTIVE_ACTIONS.md @@ -1,78 +1,52 @@ # Destructive Actions -**Last Updated:** 2026-03-13 -**Total destructive actions:** 15 across 7 tools +**Last Updated:** 2026-03-24 +**Total destructive actions:** 12 across 8 domains (single `unraid` tool) All destructive actions require `confirm=True` at the call site. There is no additional environment variable gate — `confirm` is the sole guard. -> **mcporter commands below** use `$MCP_URL` (default: `http://localhost:6970/mcp`). Run `test-actions.sh` for automated non-destructive coverage; destructive actions are always skipped there and tested manually per the strategies below. +> **mcporter commands below** use stdio transport. Run `test-tools.sh` for automated non-destructive coverage; destructive actions are always skipped there and tested manually per the strategies below. +> +> **Calling convention (v1.0.0+):** All operations use the single `unraid` tool with `action` (domain) + `subaction` (operation). For example: +> `mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid --args '{"action":"docker","subaction":"list"}'` --- -## `unraid_docker` +## `array` -### `remove` — Delete a container permanently +### `stop_array` — Stop the Unraid array + +**Strategy: mock/safety audit only.** +Stopping the array unmounts all shares and can interrupt running containers and VMs accessing array data. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. Do not run live unless all containers and VMs are shut down first. + +--- + +### `remove_disk` — Remove a disk from the array ```bash -# 1. Provision a throwaway canary container -docker run -d --name mcp-test-canary alpine sleep 3600 +# Prerequisite: array must already be stopped; use a disk you intend to remove -# 2. Discover its MCP-assigned ID -CID=$(mcporter call --http-url "$MCP_URL" --tool unraid_docker \ - --args '{"action":"list"}' --output json \ - | python3 -c "import json,sys; cs=json.load(sys.stdin).get('containers',[]); print(next(c['id'] for c in cs if 'mcp-test-canary' in c.get('name','')))") - -# 3. Remove via MCP -mcporter call --http-url "$MCP_URL" --tool unraid_docker \ - --args "{\"action\":\"remove\",\"container_id\":\"$CID\",\"confirm\":true}" --output json - -# 4. Verify -docker ps -a | grep mcp-test-canary # should return nothing +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"array","subaction":"remove_disk","disk_id":"","confirm":true}' --output json ``` --- -### `update_all` — Pull latest images and restart all containers - -**Strategy: mock/safety audit only.** -No safe live isolation — this hits every running container. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. Do not run live unless all containers can tolerate a simultaneous restart. - ---- - -### `delete_entries` — Delete Docker organizer folders/entries +### `clear_disk_stats` — Clear I/O statistics for a disk (irreversible) ```bash -# 1. Create a throwaway organizer folder -# Parameter: folder_name (str); ID is in organizer.views.flatEntries[type==FOLDER] -FOLDER=$(mcporter call --http-url "$MCP_URL" --tool unraid_docker \ - --args '{"action":"create_folder","folder_name":"mcp-test-delete-me"}' --output json) -FID=$(echo "$FOLDER" | python3 -c " -import json,sys -data=json.load(sys.stdin) -entries=(data.get('organizer',{}).get('views',{}).get('flatEntries') or []) -match=next((e['id'] for e in entries if e.get('type')=='FOLDER' and 'mcp-test' in e.get('name','')),'' ) -print(match)") +# Discover disk IDs +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"disk","subaction":"disks"}' --output json -# 2. Delete it -mcporter call --http-url "$MCP_URL" --tool unraid_docker \ - --args "{\"action\":\"delete_entries\",\"entry_ids\":[\"$FID\"],\"confirm\":true}" --output json - -# 3. Verify -mcporter call --http-url "$MCP_URL" --tool unraid_docker \ - --args '{"action":"list"}' --output json | python3 -c \ - "import json,sys; folders=[x for x in json.load(sys.stdin).get('folders',[]) if 'mcp-test' in x.get('name','')]; print('clean' if not folders else folders)" +# Clear stats for a specific disk +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"array","subaction":"clear_disk_stats","disk_id":"","confirm":true}' --output json ``` --- -### `reset_template_mappings` — Wipe all template-to-container associations - -**Strategy: mock/safety audit only.** -Global state — wipes all template mappings, requires full remapping afterward. No safe isolation. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. - ---- - -## `unraid_vm` +## `vm` ### `force_stop` — Hard power-off a VM (potential data corruption) @@ -80,16 +54,16 @@ Global state — wipes all template mappings, requires full remapping afterward. # Prerequisite: create a minimal Alpine test VM in Unraid VM manager # (Alpine ISO, 512MB RAM, no persistent disk, name contains "mcp-test") -VID=$(mcporter call --http-url "$MCP_URL" --tool unraid_vm \ - --args '{"action":"list"}' --output json \ +VID=$(mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"vm","subaction":"list"}' --output json \ | python3 -c "import json,sys; vms=json.load(sys.stdin).get('vms',[]); print(next(v.get('uuid',v.get('id','')) for v in vms if 'mcp-test' in v.get('name','')))") -mcporter call --http-url "$MCP_URL" --tool unraid_vm \ - --args "{\"action\":\"force_stop\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args "{\"action\":\"vm\",\"subaction\":\"force_stop\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json # Verify: VM state should return to stopped -mcporter call --http-url "$MCP_URL" --tool unraid_vm \ - --args "{\"action\":\"details\",\"vm_id\":\"$VID\"}" --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args "{\"action\":\"vm\",\"subaction\":\"details\",\"vm_id\":\"$VID\"}" --output json ``` --- @@ -98,27 +72,27 @@ mcporter call --http-url "$MCP_URL" --tool unraid_vm \ ```bash # Same minimal Alpine test VM as above -VID=$(mcporter call --http-url "$MCP_URL" --tool unraid_vm \ - --args '{"action":"list"}' --output json \ +VID=$(mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"vm","subaction":"list"}' --output json \ | python3 -c "import json,sys; vms=json.load(sys.stdin).get('vms',[]); print(next(v.get('uuid',v.get('id','')) for v in vms if 'mcp-test' in v.get('name','')))") -mcporter call --http-url "$MCP_URL" --tool unraid_vm \ - --args "{\"action\":\"reset\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args "{\"action\":\"vm\",\"subaction\":\"reset\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json ``` --- -## `unraid_notifications` +## `notification` ### `delete` — Permanently delete a notification ```bash # 1. Create a test notification, then list to get the real stored ID (create response # ID is ULID-based; stored filename uses a unix timestamp, so IDs differ) -mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ - --args '{"action":"create","title":"mcp-test-delete","subject":"safe to delete","description":"MCP destructive action test","importance":"INFO"}' --output json -NID=$(mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ - --args '{"action":"list","notification_type":"UNREAD"}' --output json \ +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"notification","subaction":"create","title":"mcp-test-delete","subject":"safe to delete","description":"MCP destructive action test","importance":"INFO"}' --output json +NID=$(mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"notification","subaction":"list","notification_type":"UNREAD"}' --output json \ | python3 -c " import json,sys notifs=json.load(sys.stdin).get('notifications',[]) @@ -126,12 +100,12 @@ matches=[n['id'] for n in reversed(notifs) if n.get('title')=='mcp-test-delete'] print(matches[0] if matches else '')") # 2. Delete it (notification_type required) -mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ - --args "{\"action\":\"delete\",\"notification_id\":\"$NID\",\"notification_type\":\"UNREAD\",\"confirm\":true}" --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args "{\"action\":\"notification\",\"subaction\":\"delete\",\"notification_id\":\"$NID\",\"notification_type\":\"UNREAD\",\"confirm\":true}" --output json # 3. Verify -mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ - --args '{"action":"list"}' --output json | python3 -c \ +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"notification","subaction":"list"}' --output json | python3 -c \ "import json,sys; ns=[n for n in json.load(sys.stdin).get('notifications',[]) if 'mcp-test' in n.get('title','')]; print('clean' if not ns else ns)" ``` @@ -141,45 +115,45 @@ mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ ```bash # 1. Create and archive a test notification -mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ - --args '{"action":"create","title":"mcp-test-archive-wipe","subject":"archive me","description":"safe to delete","importance":"INFO"}' --output json -AID=$(mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ - --args '{"action":"list","notification_type":"UNREAD"}' --output json \ +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"notification","subaction":"create","title":"mcp-test-archive-wipe","subject":"archive me","description":"safe to delete","importance":"INFO"}' --output json +AID=$(mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"notification","subaction":"list","notification_type":"UNREAD"}' --output json \ | python3 -c " import json,sys notifs=json.load(sys.stdin).get('notifications',[]) matches=[n['id'] for n in reversed(notifs) if n.get('title')=='mcp-test-archive-wipe'] print(matches[0] if matches else '')") -mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ - --args "{\"action\":\"archive\",\"notification_id\":\"$AID\"}" --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args "{\"action\":\"notification\",\"subaction\":\"archive\",\"notification_id\":\"$AID\"}" --output json # 2. Wipe all archived # NOTE: this deletes ALL archived notifications, not just the test one -mcporter call --http-url "$MCP_URL" --tool unraid_notifications \ - --args '{"action":"delete_archived","confirm":true}' --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"notification","subaction":"delete_archived","confirm":true}' --output json ``` > Run on `shart` if archival history on `tootie` matters. --- -## `unraid_rclone` +## `rclone` ### `delete_remote` — Remove an rclone remote configuration ```bash # 1. Create a throwaway local remote (points to /tmp — no real data) # Parameters: name (str), provider_type (str), config_data (dict) -mcporter call --http-url "$MCP_URL" --tool unraid_rclone \ - --args '{"action":"create_remote","name":"mcp-test-remote","provider_type":"local","config_data":{"root":"/tmp"}}' --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"rclone","subaction":"create_remote","name":"mcp-test-remote","provider_type":"local","config_data":{"root":"/tmp"}}' --output json # 2. Delete it -mcporter call --http-url "$MCP_URL" --tool unraid_rclone \ - --args '{"action":"delete_remote","name":"mcp-test-remote","confirm":true}' --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"rclone","subaction":"delete_remote","name":"mcp-test-remote","confirm":true}' --output json # 3. Verify -mcporter call --http-url "$MCP_URL" --tool unraid_rclone \ - --args '{"action":"list_remotes"}' --output json | python3 -c \ +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"rclone","subaction":"list_remotes"}' --output json | python3 -c \ "import json,sys; remotes=json.load(sys.stdin).get('remotes',[]); print('clean' if 'mcp-test-remote' not in remotes else 'FOUND — cleanup failed')" ``` @@ -187,29 +161,29 @@ mcporter call --http-url "$MCP_URL" --tool unraid_rclone \ --- -## `unraid_keys` +## `key` ### `delete` — Delete an API key (immediately revokes access) ```bash # 1. Create a test key (names cannot contain hyphens; ID is at key.id) -KID=$(mcporter call --http-url "$MCP_URL" --tool unraid_keys \ - --args '{"action":"create","name":"mcp test key","roles":["VIEWER"]}' --output json \ +KID=$(mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"key","subaction":"create","name":"mcp test key","roles":["VIEWER"]}' --output json \ | python3 -c "import json,sys; print(json.load(sys.stdin).get('key',{}).get('id',''))") # 2. Delete it -mcporter call --http-url "$MCP_URL" --tool unraid_keys \ - --args "{\"action\":\"delete\",\"key_id\":\"$KID\",\"confirm\":true}" --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args "{\"action\":\"key\",\"subaction\":\"delete\",\"key_id\":\"$KID\",\"confirm\":true}" --output json # 3. Verify -mcporter call --http-url "$MCP_URL" --tool unraid_keys \ - --args '{"action":"list"}' --output json | python3 -c \ - "import json,sys; ks=json.load(sys.stdin).get('keys',[]); print('clean' if not any('mcp-test-key' in k.get('name','') for k in ks) else 'FOUND — cleanup failed')" +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"key","subaction":"list"}' --output json | python3 -c \ + "import json,sys; ks=json.load(sys.stdin).get('keys',[]); print('clean' if not any('mcp test key' in k.get('name','') for k in ks) else 'FOUND — cleanup failed')" ``` --- -## `unraid_storage` +## `disk` ### `flash_backup` — Rclone backup of flash drive (overwrites destination) @@ -217,70 +191,34 @@ mcporter call --http-url "$MCP_URL" --tool unraid_keys \ # Prerequisite: create a dedicated test remote pointing away from real backup destination # (use rclone create_remote first, or configure mcp-test-remote manually) -mcporter call --http-url "$MCP_URL" --tool unraid_storage \ - --args '{"action":"flash_backup","remote_name":"mcp-test-remote","source_path":"/boot","destination_path":"/flash-backup-test","confirm":true}' --output json +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"disk","subaction":"flash_backup","remote_name":"mcp-test-remote","source_path":"/boot","destination_path":"/flash-backup-test","confirm":true}' --output json ``` > Never point at the same destination as your real flash backup. Create a dedicated `mcp-test-remote` (see `rclone: delete_remote` above for provisioning pattern). --- -## `unraid_settings` +## `setting` ### `configure_ups` — Overwrite UPS monitoring configuration **Strategy: mock/safety audit only.** -Wrong config can break UPS integration. If live testing is required: read current config via `unraid_info ups_config`, save values, re-apply identical values (no-op), verify response matches. Test via `tests/safety/` for guard behavior. +Wrong config can break UPS integration. If live testing is required: read current config via `unraid(action="system", subaction="ups_config")`, save values, re-apply identical values (no-op), verify response matches. Test via `tests/safety/` for guard behavior. --- -### `setup_remote_access` — Modify remote access configuration +## `plugin` + +### `remove` — Uninstall a plugin (irreversible without re-install) **Strategy: mock/safety audit only.** -Misconfiguration can break remote connectivity and lock you out. Do not run live. Test via `tests/safety/` confirming `confirm=False` raises `ToolError`. - ---- - -### `enable_dynamic_remote_access` — Toggle dynamic remote access +Removing a plugin cannot be undone without a full re-install. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. Do not run live unless the plugin is intentionally being uninstalled. ```bash -# Strategy: toggle to false (disabling is reversible) on shart only, then restore -# Step 1: Read current state -CURRENT=$(mcporter call --http-url "$SHART_MCP_URL" --tool unraid_info \ - --args '{"action":"settings"}' --output json) - -# Step 2: Disable (safe — can be re-enabled) -mcporter call --http-url "$SHART_MCP_URL" --tool unraid_settings \ - --args '{"action":"enable_dynamic_remote_access","access_url_type":"SUBDOMAINS","dynamic_enabled":false,"confirm":true}' --output json - -# Step 3: Restore to previous state -mcporter call --http-url "$SHART_MCP_URL" --tool unraid_settings \ - --args '{"action":"enable_dynamic_remote_access","access_url_type":"SUBDOMAINS","dynamic_enabled":true,"confirm":true}' --output json -``` - -> Run on `shart` (10.1.0.3) only — never `tootie`. - ---- - -## `unraid_info` - -### `update_ssh` — Change SSH enabled state and port - -```bash -# Strategy: read current config, re-apply same values (no-op change) - -# 1. Read current SSH settings -CURRENT=$(mcporter call --http-url "$MCP_URL" --tool unraid_info \ - --args '{"action":"settings"}' --output json) -SSH_ENABLED=$(echo "$CURRENT" | python3 -c "import json,sys; print(json.load(sys.stdin).get('ssh',{}).get('enabled', True))") -SSH_PORT=$(echo "$CURRENT" | python3 -c "import json,sys; print(json.load(sys.stdin).get('ssh',{}).get('port', 22))") - -# 2. Re-apply same values (no-op) -mcporter call --http-url "$MCP_URL" --tool unraid_info \ - --args "{\"action\":\"update_ssh\",\"ssh_enabled\":$SSH_ENABLED,\"ssh_port\":$SSH_PORT,\"confirm\":true}" --output json - -# 3. Verify SSH connectivity still works -ssh root@"$UNRAID_HOST" -p "$SSH_PORT" exit +# If live testing is necessary (intentional removal only): +mcporter call --stdio-cmd "uv run unraid-mcp-server" --tool unraid \ + --args '{"action":"plugin","subaction":"remove","names":[""],"confirm":true}' --output json ``` --- @@ -290,7 +228,9 @@ ssh root@"$UNRAID_HOST" -p "$SSH_PORT" exit The `tests/safety/` directory contains pytest tests that verify: - Every destructive action raises `ToolError` when called with `confirm=False` - Every destructive action raises `ToolError` when called without the `confirm` parameter -- The `DESTRUCTIVE_ACTIONS` set in each tool file stays in sync with the actions listed above +- The `_*_DESTRUCTIVE` sets in `unraid_mcp/tools/unraid.py` stay in sync with the actions listed above +- No GraphQL request reaches the network layer when confirmation is missing (`TestNoGraphQLCallsWhenUnconfirmed`) +- Non-destructive actions never require `confirm` (`TestNonDestructiveActionsNeverRequireConfirm`) These run as part of the standard test suite: @@ -302,20 +242,17 @@ uv run pytest tests/safety/ -v ## Summary Table -| Tool | Action | Strategy | Target Server | -|------|--------|----------|---------------| -| `unraid_docker` | `remove` | Pre-existing stopped container on Unraid server (skipped in test-destructive.sh) | either | -| `unraid_docker` | `update_all` | Mock/safety audit only | — | -| `unraid_docker` | `delete_entries` | Create folder → destroy | either | -| `unraid_docker` | `reset_template_mappings` | Mock/safety audit only | — | -| `unraid_vm` | `force_stop` | Minimal Alpine test VM | either | -| `unraid_vm` | `reset` | Minimal Alpine test VM | either | -| `unraid_notifications` | `delete` | Create notification → destroy | either | -| `unraid_notifications` | `delete_archived` | Create → archive → wipe | shart preferred | -| `unraid_rclone` | `delete_remote` | Create local:/tmp remote → destroy | either | -| `unraid_keys` | `delete` | Create test key → destroy | either | -| `unraid_storage` | `flash_backup` | Dedicated test remote, isolated path | either | -| `unraid_settings` | `configure_ups` | Mock/safety audit only | — | -| `unraid_settings` | `setup_remote_access` | Mock/safety audit only | — | -| `unraid_settings` | `enable_dynamic_remote_access` | Toggle false → restore | shart only | -| `unraid_info` | `update_ssh` | Read → re-apply same values (no-op) | either | +| Domain (`action=`) | Subaction | Strategy | Target Server | +|--------------------|-----------|----------|---------------| +| `array` | `stop_array` | Mock/safety audit only | — | +| `array` | `remove_disk` | Array must be stopped; use intended disk | either | +| `array` | `clear_disk_stats` | Discover disk ID → clear | either | +| `vm` | `force_stop` | Minimal Alpine test VM | either | +| `vm` | `reset` | Minimal Alpine test VM | either | +| `notification` | `delete` | Create notification → destroy | either | +| `notification` | `delete_archived` | Create → archive → wipe | shart preferred | +| `rclone` | `delete_remote` | Create local:/tmp remote → destroy | either | +| `key` | `delete` | Create test key → destroy | either | +| `disk` | `flash_backup` | Dedicated test remote, isolated path | either | +| `setting` | `configure_ups` | Mock/safety audit only | — | +| `plugin` | `remove` | Mock/safety audit only | — | diff --git a/docs/MARKETPLACE.md b/docs/MARKETPLACE.md index 72f5415..e5f6679 100644 --- a/docs/MARKETPLACE.md +++ b/docs/MARKETPLACE.md @@ -11,37 +11,77 @@ The marketplace catalog that lists all available plugins in this repository. **Contents:** - Marketplace metadata (name, version, owner, repository) -- Plugin catalog with the "unraid" skill +- Plugin catalog with the "unraid" plugin - Categories and tags for discoverability -### 2. Plugin Manifest (`skills/unraid/.claude-plugin/plugin.json`) -The individual plugin configuration for the Unraid skill. +### 2. Plugin Manifest (`.claude-plugin/plugin.json`) +The individual plugin configuration for the Unraid MCP server. -**Location:** `skills/unraid/.claude-plugin/plugin.json` +**Location:** `.claude-plugin/plugin.json` **Contents:** -- Plugin name, version, author +- Plugin name (`unraid`), version (`1.1.2`), author - Repository and homepage links -- Plugin-specific metadata +- `mcpServers` block that configures the server to run via `uv run unraid-mcp-server` in stdio mode -### 3. Documentation -- `.claude-plugin/README.md` - Marketplace installation guide -- Updated root `README.md` with plugin installation section +### 3. Validation Script +- `scripts/validate-marketplace.sh` — Automated validation of marketplace structure -### 4. Validation Script -- `scripts/validate-marketplace.sh` - Automated validation of marketplace structure +## MCP Tools Exposed + +The plugin registers **3 MCP tools**: + +| Tool | Purpose | +|------|---------| +| `unraid` | Primary tool — `action` (domain) + `subaction` (operation) routing, ~107 subactions across 15 domains | +| `diagnose_subscriptions` | Inspect WebSocket subscription connection states and errors | +| `test_subscription_query` | Test a specific GraphQL subscription query (allowlisted fields only) | + +### Calling Convention + +All Unraid operations go through the single `unraid` tool: + +``` +unraid(action="docker", subaction="list") +unraid(action="system", subaction="overview") +unraid(action="array", subaction="parity_status") +unraid(action="vm", subaction="list") +unraid(action="live", subaction="cpu") +``` + +### Domains (action=) + +| action | example subactions | +|--------|--------------------| +| `system` | overview, array, network, metrics, services, ups_devices | +| `health` | check, test_connection, diagnose, setup | +| `array` | parity_status, parity_start, start_array, add_disk | +| `disk` | shares, disks, disk_details, logs | +| `docker` | list, details, start, stop, restart | +| `vm` | list, details, start, stop, pause, resume | +| `notification` | overview, list, create, archive, archive_all | +| `key` | list, get, create, update, delete | +| `plugin` | list, add, remove | +| `rclone` | list_remotes, config_form, create_remote | +| `setting` | update, configure_ups | +| `customization` | theme, set_theme, sso_enabled | +| `oidc` | providers, configuration, validate_session | +| `user` | me | +| `live` | cpu, memory, array_state, log_tail, notification_feed | + +Destructive subactions (e.g. `stop_array`, `force_stop`, `delete`) require `confirm=True`. ## Installation Methods ### Method 1: GitHub Distribution (Recommended for Users) -Once you push this to GitHub, users can install via: +Once pushed to GitHub, users install via: ```bash -# Add your marketplace +# Add the marketplace /plugin marketplace add jmagar/unraid-mcp -# Install the Unraid skill +# Install the Unraid plugin /plugin install unraid @unraid-mcp ``` @@ -59,7 +99,7 @@ For testing locally before publishing: ### Method 3: Direct URL -Users can also install from a specific commit or branch: +Install from a specific branch or commit: ```bash # From specific branch @@ -73,17 +113,16 @@ Users can also install from a specific commit or branch: ```text unraid-mcp/ -├── .claude-plugin/ # Marketplace manifest -│ ├── marketplace.json -│ └── README.md -├── skills/unraid/ # Plugin directory -│ ├── .claude-plugin/ # Plugin manifest -│ │ └── plugin.json -│ ├── SKILL.md # Skill documentation -│ ├── README.md # Plugin documentation -│ ├── examples/ # Example scripts -│ ├── scripts/ # Helper scripts -│ └── references/ # API reference docs +├── .claude-plugin/ # Plugin manifest + marketplace manifest +│ ├── plugin.json # Plugin configuration (name, version, mcpServers) +│ └── marketplace.json # Marketplace catalog +├── unraid_mcp/ # Python package (the actual MCP server) +│ ├── main.py # Entry point +│ ├── server.py # FastMCP server registration +│ ├── tools/unraid.py # Consolidated tool (all 3 tools registered here) +│ ├── config/ # Settings management +│ ├── core/ # GraphQL client, exceptions, shared types +│ └── subscriptions/ # Real-time WebSocket subscription manager └── scripts/ └── validate-marketplace.sh # Validation tool ``` @@ -91,15 +130,15 @@ unraid-mcp/ ## Marketplace Metadata ### Categories -- `infrastructure` - Server management and monitoring tools +- `infrastructure` — Server management and monitoring tools ### Tags -- `unraid` - Unraid-specific functionality -- `monitoring` - System monitoring capabilities -- `homelab` - Homelab automation -- `graphql` - GraphQL API integration -- `docker` - Docker container management -- `virtualization` - VM management +- `unraid` — Unraid-specific functionality +- `monitoring` — System monitoring capabilities +- `homelab` — Homelab automation +- `graphql` — GraphQL API integration +- `docker` — Docker container management +- `virtualization` — VM management ## Publishing Checklist @@ -110,10 +149,10 @@ Before publishing to GitHub: ./scripts/validate-marketplace.sh ``` -2. **Update Version Numbers** - - Bump version in `.claude-plugin/marketplace.json` - - Bump version in `skills/unraid/.claude-plugin/plugin.json` - - Update version in `README.md` if needed +2. **Update Version Numbers** (must be in sync) + - `pyproject.toml` → `version = "X.Y.Z"` under `[project]` + - `.claude-plugin/plugin.json` → `"version": "X.Y.Z"` + - `.claude-plugin/marketplace.json` → `"version"` in both `metadata` and `plugins[]` 3. **Test Locally** ```bash @@ -123,34 +162,39 @@ Before publishing to GitHub: 4. **Commit and Push** ```bash - git add .claude-plugin/ skills/unraid/.claude-plugin/ - git commit -m "feat: add Claude Code marketplace configuration" + git add .claude-plugin/ + git commit -m "chore: bump marketplace to vX.Y.Z" git push origin main ``` -5. **Create Release Tag** (Optional) +5. **Create Release Tag** ```bash - git tag -a v0.2.0 -m "Release v0.2.0" - git push origin v0.2.0 + git tag -a vX.Y.Z -m "Release vX.Y.Z" + git push origin vX.Y.Z ``` ## User Experience -After installation, users will: +After installation, users can: -1. **See the skill in their skill list** - ```bash - /skill list +1. **Invoke Unraid operations directly in Claude Code** + ``` + unraid(action="system", subaction="overview") + unraid(action="docker", subaction="list") + unraid(action="health", subaction="check") ``` -2. **Access Unraid functionality directly** - - Claude Code will automatically detect when to invoke the skill - - Users can explicitly invoke with `/unraid` +2. **Use the credential setup tool on first run** + ``` + unraid(action="health", subaction="setup") + ``` + This triggers elicitation to collect and persist credentials to `~/.unraid-mcp/.env`. -3. **Have access to all helper scripts** - - Example scripts in `examples/` - - Utility scripts in `scripts/` - - API reference in `references/` +3. **Monitor live data via subscriptions** + ``` + unraid(action="live", subaction="cpu") + unraid(action="live", subaction="log_tail") + ``` ## Maintenance @@ -158,31 +202,21 @@ After installation, users will: To release a new version: -1. Make changes to the plugin -2. Update version in `skills/unraid/.claude-plugin/plugin.json` -3. Update marketplace catalog in `.claude-plugin/marketplace.json` -4. Run validation: `./scripts/validate-marketplace.sh` -5. Commit and push +1. Make changes to the plugin code +2. Update version in `pyproject.toml`, `.claude-plugin/plugin.json`, and `.claude-plugin/marketplace.json` +3. Run validation: `./scripts/validate-marketplace.sh` +4. Commit and push -Users with the plugin installed will see the update available and can upgrade with: +Users with the plugin installed will see the update available and can upgrade: ```bash /plugin update unraid ``` -### Adding More Plugins - -To add additional plugins to this marketplace: - -1. Create new plugin directory: `skills/new-plugin/` -2. Add plugin manifest: `skills/new-plugin/.claude-plugin/plugin.json` -3. Update marketplace catalog: add entry to `.plugins[]` array in `.claude-plugin/marketplace.json` -4. Validate: `./scripts/validate-marketplace.sh` - ## Support - **Repository:** https://github.com/jmagar/unraid-mcp - **Issues:** https://github.com/jmagar/unraid-mcp/issues -- **Documentation:** See `.claude-plugin/README.md` and `skills/unraid/README.md` +- **Destructive Actions:** `docs/DESTRUCTIVE_ACTIONS.md` ## Validation @@ -199,5 +233,3 @@ This checks: - Plugin structure - Source path accuracy - Documentation completeness - -All 17 checks must pass before publishing. diff --git a/docs/PUBLISHING.md b/docs/PUBLISHING.md index 95bf65f..b3f0a67 100644 --- a/docs/PUBLISHING.md +++ b/docs/PUBLISHING.md @@ -2,6 +2,26 @@ This guide covers how to publish `unraid-mcp` to PyPI so it can be installed via `uvx` or `pip` from anywhere. +## Package Overview + +**PyPI package name:** `unraid-mcp` +**Entry point binary:** `unraid-mcp-server` (also aliased as `unraid-mcp`) +**Current version:** `1.1.2` + +The package ships a FastMCP server exposing **3 MCP tools**: +- `unraid` — primary tool with `action` + `subaction` routing (~107 subactions, 15 domains) +- `diagnose_subscriptions` — WebSocket subscription diagnostics +- `test_subscription_query` — test individual GraphQL subscription queries + +Tool call convention: `unraid(action="docker", subaction="list")` + +### Version Sync Requirement + +When bumping the version, **all three files must be updated together**: +- `pyproject.toml` → `version = "X.Y.Z"` under `[project]` +- `.claude-plugin/plugin.json` → `"version": "X.Y.Z"` +- `.claude-plugin/marketplace.json` → `"version"` in both `metadata` and `plugins[]` + ## Prerequisites 1. **PyPI Account**: Create accounts on both: @@ -40,7 +60,7 @@ Before publishing, update the version in `pyproject.toml`: ```toml [project] -version = "0.2.1" # Follow semantic versioning: MAJOR.MINOR.PATCH +version = "1.1.2" # Follow semantic versioning: MAJOR.MINOR.PATCH ``` **Semantic Versioning Guide:** @@ -82,8 +102,8 @@ uv run python -m build ``` This creates: -- `dist/unraid_mcp-VERSION-py3-none-any.whl` (wheel) -- `dist/unraid_mcp-VERSION.tar.gz` (source distribution) +- `dist/unraid_mcp-1.1.2-py3-none-any.whl` (wheel) +- `dist/unraid_mcp-1.1.2.tar.gz` (source distribution) ### 4. Validate the Package @@ -156,7 +176,7 @@ UNRAID_API_URL=https://your-server uvx unraid-mcp-server **Benefits of uvx:** - No installation required - Automatic virtual environment management -- Always uses the latest version (or specify version: `uvx unraid-mcp-server@0.2.0`) +- Always uses the latest version (or specify version: `uvx unraid-mcp-server@1.1.2`) - Clean execution environment ## Automation with GitHub Actions (Future) diff --git a/pyproject.toml b/pyproject.toml index 69f42f9..ab40855 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ build-backend = "hatchling.build" # ============================================================================ [project] name = "unraid-mcp" -version = "0.6.0" +version = "1.1.3" description = "MCP Server for Unraid API - provides tools to interact with an Unraid server's GraphQL API" readme = "README.md" license = {file = "LICENSE"} @@ -71,7 +71,7 @@ classifiers = [ # ============================================================================ dependencies = [ "python-dotenv>=1.1.1", - "fastmcp>=2.14.5", + "fastmcp>=3.0.0", "httpx>=0.28.1", "fastapi>=0.115.0", "uvicorn[standard]>=0.35.0", @@ -258,6 +258,7 @@ omit = [ ] [tool.coverage.report] +fail_under = 80 precision = 2 show_missing = true skip_covered = false diff --git a/scripts/validate-marketplace.sh b/scripts/validate-marketplace.sh index e668c3c..fcb31a9 100755 --- a/scripts/validate-marketplace.sh +++ b/scripts/validate-marketplace.sh @@ -70,6 +70,20 @@ else echo -e "Checking: Plugin source path is valid... ${RED}✗${NC} (plugin not found in marketplace)" fi +# Check version sync between pyproject.toml and plugin.json +echo "Checking version sync..." +TOML_VER=$(grep -m1 '^version = ' pyproject.toml | sed 's/version = "//;s/"//') +PLUGIN_VER=$(python3 -c "import json; print(json.load(open('.claude-plugin/plugin.json'))['version'])" 2>/dev/null || echo "ERROR_READING") +if [ "$TOML_VER" != "$PLUGIN_VER" ]; then + echo -e "${RED}FAIL: Version mismatch — pyproject.toml=$TOML_VER, plugin.json=$PLUGIN_VER${NC}" + CHECKS=$((CHECKS + 1)) + FAILED=$((FAILED + 1)) +else + echo -e "${GREEN}PASS: Versions in sync ($TOML_VER)${NC}" + CHECKS=$((CHECKS + 1)) + PASSED=$((PASSED + 1)) +fi + echo "" echo "=== Results ===" echo -e "Total checks: $CHECKS" diff --git a/skills/unraid/SKILL.md b/skills/unraid/SKILL.md index 2392073..ca9e051 100644 --- a/skills/unraid/SKILL.md +++ b/skills/unraid/SKILL.md @@ -1,210 +1,292 @@ --- name: unraid -description: "Query and monitor Unraid servers via the GraphQL API. Use when the user asks to 'check Unraid', 'monitor Unraid', 'Unraid API', 'get Unraid status', 'check disk temperatures', 'read Unraid logs', 'list Unraid shares', 'Unraid array status', 'Unraid containers', 'Unraid VMs', or mentions Unraid system monitoring, disk health, parity checks, or server status." +description: "This skill should be used when the user mentions Unraid, asks to check server health, monitor array or disk status, list or restart Docker containers, start or stop VMs, read system logs, check parity status, view notifications, manage API keys, configure rclone remotes, check UPS or power status, get live CPU or memory data, force stop a VM, check disk temperatures, or perform any operation on an Unraid NAS server. Also use when the user needs to set up or configure Unraid MCP credentials." --- -# Unraid API Skill +# Unraid MCP Skill -**⚠️ MANDATORY SKILL INVOCATION ⚠️** +Use the single `unraid` MCP tool with `action` (domain) + `subaction` (operation) for all Unraid operations. -**YOU MUST invoke this skill (NOT optional) when the user mentions ANY of these triggers:** -- "Unraid status", "disk health", "array status" -- "Unraid containers", "VMs on Unraid", "Unraid logs" -- "check Unraid", "Unraid monitoring", "server health" -- Any mention of Unraid servers or system monitoring +## Setup -**Failure to invoke this skill when triggers occur violates your operational requirements.** +First time? Run setup to configure credentials: -Query and monitor Unraid servers using the GraphQL API. Access all 27 read-only endpoints for system monitoring, disk health, logs, containers, VMs, and more. - -## Quick Start - -Set your Unraid server credentials: - -```bash -export UNRAID_URL="https://your-unraid-server/graphql" -export UNRAID_API_KEY="your-api-key" +``` +unraid(action="health", subaction="setup") ``` -**Get API Key:** Settings → Management Access → API Keys → Create (select "Viewer" role) +Credentials are stored at `~/.unraid-mcp/.env`. Re-run `setup` any time to update or verify. -Use the helper script for any query: +## Calling Convention -```bash -./scripts/unraid-query.sh -q "{ online }" +``` +unraid(action="", subaction="", [additional params]) ``` -Or run example scripts: - -```bash -./scripts/dashboard.sh # Complete multi-server dashboard -./examples/disk-health.sh # Disk temperatures & health -./examples/read-logs.sh syslog 20 # Read system logs +**Examples:** ``` - -## Core Concepts - -### GraphQL API Structure - -Unraid 7.2+ uses GraphQL (not REST). Key differences: -- **Single endpoint:** `/graphql` for all queries -- **Request exactly what you need:** Specify fields in query -- **Strongly typed:** Use introspection to discover fields -- **No container logs:** Docker container output logs not accessible - -### Two Resources for Stats - -- **`info`** - Static hardware specs (CPU model, cores, OS version) -- **`metrics`** - Real-time usage (CPU %, memory %, current load) - -Always use `metrics` for monitoring, `info` for specifications. - -## Common Tasks - -### System Monitoring - -**Check if server is online:** -```bash -./scripts/unraid-query.sh -q "{ online }" -``` - -**Get CPU and memory usage:** -```bash -./scripts/unraid-query.sh -q "{ metrics { cpu { percentTotal } memory { used total percentTotal } } }" -``` - -**Complete dashboard:** -```bash -./scripts/dashboard.sh -``` - -### Disk Management - -**Check disk health and temperatures:** -```bash -./examples/disk-health.sh -``` - -**Get array status:** -```bash -./scripts/unraid-query.sh -q "{ array { state parityCheckStatus { status progress errors } } }" -``` - -**List all physical disks (including cache/USB):** -```bash -./scripts/unraid-query.sh -q "{ disks { name } }" -``` - -### Storage Shares - -**List network shares:** -```bash -./scripts/unraid-query.sh -q "{ shares { name comment } }" -``` - -### Logs - -**List available logs:** -```bash -./scripts/unraid-query.sh -q "{ logFiles { name size modifiedAt } }" -``` - -**Read log content:** -```bash -./examples/read-logs.sh syslog 20 -``` - -### Containers & VMs - -**List Docker containers:** -```bash -./scripts/unraid-query.sh -q "{ docker { containers { names image state status } } }" -``` - -**List VMs:** -```bash -./scripts/unraid-query.sh -q "{ vms { domain { name state } } }" -``` - -**Note:** Container output logs are NOT accessible via API. Use `docker logs` via SSH. - -### Notifications - -**Get notification counts:** -```bash -./scripts/unraid-query.sh -q "{ notifications { overview { unread { info warning alert total } } } }" -``` - -## Helper Script Usage - -The `scripts/unraid-query.sh` helper supports: - -```bash -# Basic usage -./scripts/unraid-query.sh -u URL -k API_KEY -q "QUERY" - -# Use environment variables -export UNRAID_URL="https://unraid.local/graphql" -export UNRAID_API_KEY="your-key" -./scripts/unraid-query.sh -q "{ online }" - -# Format options --f json # Raw JSON (default) --f pretty # Pretty-printed JSON --f raw # Just the data (no wrapper) -``` - -## Additional Resources - -### Reference Files - -For detailed documentation, consult: -- **`references/endpoints.md`** - Complete list of all 27 API endpoints -- **`references/troubleshooting.md`** - Common errors and solutions -- **`references/api-reference.md`** - Detailed field documentation - -### Helper Scripts - -- **`scripts/unraid-query.sh`** - Main GraphQL query tool -- **`scripts/dashboard.sh`** - Automated multi-server inventory reporter - -## Quick Command Reference - -```bash -# System status -./scripts/unraid-query.sh -q "{ online metrics { cpu { percentTotal } } }" - -# Disk health -./examples/disk-health.sh - -# Array status -./scripts/unraid-query.sh -q "{ array { state } }" - -# Read logs -./examples/read-logs.sh syslog 20 - -# Complete dashboard -./scripts/dashboard.sh - -# List shares -./scripts/unraid-query.sh -q "{ shares { name } }" - -# List containers -./scripts/unraid-query.sh -q "{ docker { containers { names state } } }" +unraid(action="system", subaction="overview") +unraid(action="docker", subaction="list") +unraid(action="health", subaction="check") +unraid(action="array", subaction="parity_status") +unraid(action="disk", subaction="disks") +unraid(action="vm", subaction="list") +unraid(action="notification", subaction="overview") +unraid(action="live", subaction="cpu") ``` --- -## 🔧 Agent Tool Usage Requirements +## All Domains and Subactions -**CRITICAL:** When invoking scripts from this skill via the zsh-tool, **ALWAYS use `pty: true`**. +### `system` — Server Information +| Subaction | Description | +|-----------|-------------| +| `overview` | Complete system summary (recommended starting point) | +| `server` | Hostname, version, uptime | +| `servers` | All known Unraid servers | +| `array` | Array status and disk list | +| `network` | Network interfaces and config | +| `registration` | License and registration status | +| `variables` | Environment variables | +| `metrics` | Real-time CPU, memory, I/O usage | +| `services` | Running services status | +| `display` | Display settings | +| `config` | System configuration | +| `online` | Quick online status check | +| `owner` | Server owner information | +| `settings` | User settings and preferences | +| `flash` | USB flash drive details | +| `ups_devices` | List all UPS devices | +| `ups_device` | Single UPS device (requires `device_id`) | +| `ups_config` | UPS configuration | -Without PTY mode, command output will not be visible even though commands execute successfully. +### `health` — Diagnostics +| Subaction | Description | +|-----------|-------------| +| `check` | Comprehensive health check — connectivity, array, disks, containers, VMs, resources | +| `test_connection` | Test API connectivity and authentication | +| `diagnose` | Detailed diagnostic report with troubleshooting recommendations | +| `setup` | Configure credentials interactively (stores to `~/.unraid-mcp/.env`) | -**Correct invocation pattern:** -```typescript - -./skills/SKILL_NAME/scripts/SCRIPT.sh [args] -true - +### `array` — Array & Parity +| Subaction | Description | +|-----------|-------------| +| `parity_status` | Current parity check progress and status | +| `parity_history` | Historical parity check results | +| `parity_start` | Start a parity check | +| `parity_pause` | Pause a running parity check | +| `parity_resume` | Resume a paused parity check | +| `parity_cancel` | Cancel a running parity check | +| `start_array` | Start the array | +| `stop_array` | ⚠️ Stop the array (requires `confirm=True`) | +| `add_disk` | Add a disk to the array (requires `slot`, `id`) | +| `remove_disk` | ⚠️ Remove a disk (requires `slot`, `confirm=True`) | +| `mount_disk` | Mount a disk | +| `unmount_disk` | Unmount a disk | +| `clear_disk_stats` | ⚠️ Clear disk statistics (requires `confirm=True`) | + +### `disk` — Storage & Logs +| Subaction | Description | +|-----------|-------------| +| `shares` | List network shares | +| `disks` | All physical disks with health and temperatures | +| `disk_details` | Detailed info for a specific disk (requires `disk_id`) | +| `log_files` | List available log files | +| `logs` | Read log content (requires `log_path`; optional `tail_lines`) | +| `flash_backup` | ⚠️ Trigger a flash backup (requires `confirm=True`) | + +### `docker` — Containers +| Subaction | Description | +|-----------|-------------| +| `list` | All containers with status, image, state | +| `details` | Single container details (requires container identifier) | +| `start` | Start a container (requires container identifier) | +| `stop` | Stop a container (requires container identifier) | +| `restart` | Restart a container (requires container identifier) | +| `networks` | List Docker networks | +| `network_details` | Details for a specific network (requires `network_id`) | + +**Container Identification:** Name, ID, or partial name (fuzzy match supported). + +### `vm` — Virtual Machines +| Subaction | Description | +|-----------|-------------| +| `list` | All VMs with state | +| `details` | Single VM details (requires `vm_id`) | +| `start` | Start a VM (requires `vm_id`) | +| `stop` | Gracefully stop a VM (requires `vm_id`) | +| `pause` | Pause a VM (requires `vm_id`) | +| `resume` | Resume a paused VM (requires `vm_id`) | +| `reboot` | Reboot a VM (requires `vm_id`) | +| `force_stop` | ⚠️ Force stop a VM (requires `vm_id`, `confirm=True`) | +| `reset` | ⚠️ Hard reset a VM (requires `vm_id`, `confirm=True`) | + +### `notification` — Notifications +| Subaction | Description | +|-----------|-------------| +| `overview` | Notification counts (unread, archived by type) | +| `list` | List notifications (optional `filter`, `limit`, `offset`) | +| `mark_unread` | Mark a notification as unread (requires `notification_id`) | +| `create` | Create a notification (requires `title`, `subject`, `description`, `importance`) | +| `archive` | Archive a notification (requires `notification_id`) | +| `delete` | ⚠️ Delete a notification (requires `notification_id`, `notification_type`, `confirm=True`) | +| `delete_archived` | ⚠️ Delete all archived (requires `confirm=True`) | +| `archive_all` | Archive all unread notifications | +| `archive_many` | Archive multiple (requires `ids` list) | +| `unarchive_many` | Unarchive multiple (requires `ids` list) | +| `unarchive_all` | Unarchive all archived notifications | +| `recalculate` | Recalculate notification counts | + +### `key` — API Keys +| Subaction | Description | +|-----------|-------------| +| `list` | All API keys | +| `get` | Single key details (requires `key_id`) | +| `create` | Create a new key (requires `name`; optional `roles`, `permissions`) | +| `update` | Update a key (requires `key_id`) | +| `delete` | ⚠️ Delete a key (requires `key_id`, `confirm=True`) | +| `add_role` | Add a role to a key (requires `key_id`, `roles`) | +| `remove_role` | Remove a role from a key (requires `key_id`, `roles`) | + +### `plugin` — Plugins +| Subaction | Description | +|-----------|-------------| +| `list` | All installed plugins | +| `add` | Install plugins (requires `names` — list of plugin names) | +| `remove` | ⚠️ Uninstall plugins (requires `names` — list of plugin names, `confirm=True`) | + +### `rclone` — Cloud Storage +| Subaction | Description | +|-----------|-------------| +| `list_remotes` | List configured rclone remotes | +| `config_form` | Get configuration form for a remote type | +| `create_remote` | Create a new remote (requires `name`, `provider_type`, `config_data`) | +| `delete_remote` | ⚠️ Delete a remote (requires `name`, `confirm=True`) | + +### `setting` — System Settings +| Subaction | Description | +|-----------|-------------| +| `update` | Update system settings (requires `settings_input` object) | +| `configure_ups` | ⚠️ Configure UPS settings (requires `confirm=True`) | + +### `customization` — Theme & Appearance +| Subaction | Description | +|-----------|-------------| +| `theme` | Current theme settings | +| `public_theme` | Public-facing theme | +| `is_initial_setup` | Check if initial setup is complete | +| `sso_enabled` | Check SSO status | +| `set_theme` | Update theme (requires theme parameters) | + +### `oidc` — SSO / OpenID Connect +| Subaction | Description | +|-----------|-------------| +| `providers` | List configured OIDC providers | +| `provider` | Single provider details (requires `provider_id`) | +| `configuration` | OIDC configuration | +| `public_providers` | Public-facing provider list | +| `validate_session` | Validate current SSO session (requires `token`) | + +### `user` — Current User +| Subaction | Description | +|-----------|-------------| +| `me` | Current authenticated user info | + +### `live` — Real-Time Subscriptions +These use persistent WebSocket connections. Returns a "connecting" placeholder on the first call — retry momentarily for live data. + +| Subaction | Description | +|-----------|-------------| +| `cpu` | Live CPU utilization | +| `memory` | Live memory usage | +| `cpu_telemetry` | Detailed CPU telemetry | +| `array_state` | Live array state changes | +| `parity_progress` | Live parity check progress | +| `ups_status` | Live UPS status | +| `notifications_overview` | Live notification counts | +| `owner` | Live owner info | +| `server_status` | Live server status | +| `log_tail` | Live log tail stream | +| `notification_feed` | Live notification feed | + +--- + +## Destructive Actions + +All require `confirm=True` as an explicit parameter. Without it, the action is blocked and elicitation is triggered. + +| Domain | Subaction | Risk | +|--------|-----------|------| +| `array` | `stop_array` | Stops array while containers/VMs may use shares | +| `array` | `remove_disk` | Removes disk from array | +| `array` | `clear_disk_stats` | Clears disk statistics permanently | +| `vm` | `force_stop` | Hard kills VM without graceful shutdown | +| `vm` | `reset` | Hard resets VM | +| `notification` | `delete` | Permanently deletes a notification | +| `notification` | `delete_archived` | Permanently deletes all archived notifications | +| `rclone` | `delete_remote` | Removes a cloud storage remote | +| `key` | `delete` | Permanently deletes an API key | +| `disk` | `flash_backup` | Triggers flash backup operation | +| `setting` | `configure_ups` | Modifies UPS configuration | +| `plugin` | `remove` | Uninstalls a plugin | + +--- + +## Common Workflows + +### First-time setup ``` +unraid(action="health", subaction="setup") +unraid(action="health", subaction="check") +``` + +### System health overview +``` +unraid(action="system", subaction="overview") +unraid(action="health", subaction="check") +``` + +### Container management +``` +unraid(action="docker", subaction="list") +unraid(action="docker", subaction="details", container_id="plex") +unraid(action="docker", subaction="restart", container_id="sonarr") +``` + +### Array and disk status +``` +unraid(action="array", subaction="parity_status") +unraid(action="disk", subaction="disks") +unraid(action="system", subaction="array") +``` + +### Read logs +``` +unraid(action="disk", subaction="log_files") +unraid(action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=50) +``` + +### Live monitoring +``` +unraid(action="live", subaction="cpu") +unraid(action="live", subaction="memory") +unraid(action="live", subaction="array_state") +``` + +### VM operations +``` +unraid(action="vm", subaction="list") +unraid(action="vm", subaction="start", vm_id="") +unraid(action="vm", subaction="force_stop", vm_id="", confirm=True) +``` + +--- + +## Notes + +- **Rate limit:** 100 requests / 10 seconds +- **Log path validation:** Only `/var/log/`, `/boot/logs/`, `/mnt/` prefixes accepted +- **Container logs:** Docker container stdout/stderr are NOT accessible via API — use SSH + `docker logs` +- **`arraySubscription`:** Known Unraid API bug — `live/array_state` may show "connecting" indefinitely +- **Event-driven subs** (`notifications_overview`, `owner`, `server_status`, `ups_status`): Only populate cache on first real server event diff --git a/skills/unraid/references/api-reference.md b/skills/unraid/references/api-reference.md index dd452d0..2e44f20 100644 --- a/skills/unraid/references/api-reference.md +++ b/skills/unraid/references/api-reference.md @@ -1,5 +1,7 @@ # Unraid API - Complete Reference Guide +> **⚠️ DEVELOPER REFERENCE ONLY** — This file documents the raw GraphQL API schema for development and maintenance purposes (adding new queries/mutations). Do NOT use these curl/GraphQL examples for MCP tool usage. Use `unraid(action=..., subaction=...)` calls instead. See [`SKILL.md`](../SKILL.md) for the correct calling convention. + **Tested on:** Unraid 7.2 x86_64 **Date:** 2026-01-21 **API Type:** GraphQL diff --git a/skills/unraid/references/endpoints.md b/skills/unraid/references/endpoints.md index c528846..f4fe1c0 100644 --- a/skills/unraid/references/endpoints.md +++ b/skills/unraid/references/endpoints.md @@ -1,5 +1,7 @@ # Unraid API Endpoints Reference +> **⚠️ DEVELOPER REFERENCE ONLY** — This file documents raw GraphQL endpoints for development purposes. For MCP tool usage, use `unraid(action=..., subaction=...)` calls as documented in `SKILL.md`. + Complete list of available GraphQL read-only endpoints in Unraid 7.2+. ## System & Metrics (8) diff --git a/skills/unraid/references/introspection-schema.md b/skills/unraid/references/introspection-schema.md index 62676a1..75fe057 100644 --- a/skills/unraid/references/introspection-schema.md +++ b/skills/unraid/references/introspection-schema.md @@ -1,3 +1,5 @@ +> **⚠️ DEVELOPER REFERENCE ONLY** — Full GraphQL SDL from live API introspection. Use this to verify field names and types when adding new queries/mutations to the MCP server. Not for runtime agent usage. + """ Indicates exactly one field must be supplied and this field must not be `null`. """ diff --git a/skills/unraid/references/quick-reference.md b/skills/unraid/references/quick-reference.md index 4760bb8..64bdbe8 100644 --- a/skills/unraid/references/quick-reference.md +++ b/skills/unraid/references/quick-reference.md @@ -1,219 +1,125 @@ -# Unraid API Quick Reference +# Unraid MCP — Quick Reference -Quick reference for the most common Unraid GraphQL API queries. +All operations use: `unraid(action="", subaction="", [params])` -## Setup +## Most Common Operations -```bash -# Set environment variables -export UNRAID_URL="https://your-unraid-server/graphql" -export UNRAID_API_KEY="your-api-key-here" +### Health & Status -# Or use the helper script directly -./scripts/unraid-query.sh -u "$UNRAID_URL" -k "$UNRAID_API_KEY" -q "{ online }" +```python +unraid(action="health", subaction="setup") # First-time credential setup +unraid(action="health", subaction="check") # Full health check +unraid(action="health", subaction="test_connection") # Quick connectivity test +unraid(action="system", subaction="overview") # Complete server summary +unraid(action="system", subaction="metrics") # CPU / RAM / I/O usage +unraid(action="system", subaction="online") # Online status ``` -## Common Queries +### Array & Disks -### System Status -```graphql -{ - online - metrics { - cpu { percentTotal } - memory { total used free percentTotal } - } -} +```python +unraid(action="system", subaction="array") # Array status overview +unraid(action="disk", subaction="disks") # All disks with temps & health +unraid(action="array", subaction="parity_status") # Current parity check +unraid(action="array", subaction="parity_history") # Past parity results +unraid(action="array", subaction="parity_start", correct=False) # Start parity check +unraid(action="array", subaction="stop_array", confirm=True) # ⚠️ Stop array ``` -### Array Status -```graphql -{ - array { - state - parityCheckStatus { status progress errors } - } -} -``` +### Logs -### Disk List with Temperatures -```graphql -{ - array { - disks { - name - device - temp - status - fsSize - fsFree - isSpinning - } - } -} -``` - -### All Physical Disks (including USB/SSDs) -```graphql -{ - disks { - id - name - } -} -``` - -### Network Shares -```graphql -{ - shares { - name - comment - } -} +```python +unraid(action="disk", subaction="log_files") # List available logs +unraid(action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=50) # Read syslog +unraid(action="live", subaction="log_tail", path="/var/log/syslog") # Live tail ``` ### Docker Containers -```graphql -{ - docker { - containers { - id - names - image - state - status - } - } -} + +```python +unraid(action="docker", subaction="list") +unraid(action="docker", subaction="details", container_id="plex") +unraid(action="docker", subaction="start", container_id="nginx") +unraid(action="docker", subaction="stop", container_id="nginx") +unraid(action="docker", subaction="restart", container_id="sonarr") +unraid(action="docker", subaction="networks") ``` ### Virtual Machines -```graphql -{ - vms { - id - name - state - cpus - memory - } -} -``` -### List Log Files -```graphql -{ - logFiles { - name - size - modifiedAt - } -} -``` - -### Read Log Content -```graphql -{ - logFile(path: "syslog", lines: 20) { - content - totalLines - } -} -``` - -### System Info -```graphql -{ - info { - time - cpu { model cores threads } - os { distro release } - system { manufacturer model } - } -} -``` - -### UPS Devices -```graphql -{ - upsDevices { - id - name - status - charge - load - } -} +```python +unraid(action="vm", subaction="list") +unraid(action="vm", subaction="details", vm_id="") +unraid(action="vm", subaction="start", vm_id="") +unraid(action="vm", subaction="stop", vm_id="") +unraid(action="vm", subaction="reboot", vm_id="") +unraid(action="vm", subaction="force_stop", vm_id="", confirm=True) # ⚠️ ``` ### Notifications -**Counts:** -```graphql -{ - notifications { - overview { - unread { info warning alert total } - archive { info warning alert total } - } - } -} +```python +unraid(action="notification", subaction="overview") +unraid(action="notification", subaction="list", list_type="UNREAD", limit=10) +unraid(action="notification", subaction="archive", notification_id="") +unraid(action="notification", subaction="create", title="Test", subject="Subject", + description="Body", importance="INFO") ``` -**List Unread:** -```graphql -{ - notifications { - list(filter: { type: UNREAD, offset: 0, limit: 10 }) { - id - subject - description - timestamp - } - } -} +### API Keys + +```python +unraid(action="key", subaction="list") +unraid(action="key", subaction="create", name="my-key", roles=["viewer"]) +unraid(action="key", subaction="delete", key_id="", confirm=True) # ⚠️ ``` -**List Archived:** -```graphql -{ - notifications { - list(filter: { type: ARCHIVE, offset: 0, limit: 10 }) { - id - subject - description - timestamp - } - } -} +### Plugins + +```python +unraid(action="plugin", subaction="list") +unraid(action="plugin", subaction="add", names=["community.applications"]) +unraid(action="plugin", subaction="remove", names=["old.plugin"], confirm=True) # ⚠️ ``` -## Field Name Notes +### rclone -- Use `metrics` for real-time usage (CPU/memory percentages) -- Use `info` for hardware specs (cores, model, etc.) -- Temperature field is `temp` (not `temperature`) -- Status field is `state` for array (not `status`) -- Sizes are in kilobytes -- Temperatures are in Celsius - -## Response Structure - -All responses follow this pattern: -```json -{ - "data": { - "queryName": { ... } - } -} +```python +unraid(action="rclone", subaction="list_remotes") +unraid(action="rclone", subaction="delete_remote", name="", confirm=True) # ⚠️ ``` -Errors appear in: -```json -{ - "errors": [ - { "message": "..." } - ] -} +### Live Subscriptions (real-time) + +```python +unraid(action="live", subaction="cpu") +unraid(action="live", subaction="memory") +unraid(action="live", subaction="parity_progress") +unraid(action="live", subaction="log_tail") +unraid(action="live", subaction="notification_feed") +unraid(action="live", subaction="ups_status") ``` + +> Returns `{"status": "connecting"}` on first call — retry momentarily. + +--- + +## Domain → action= Mapping + +| Old tool name (pre-v1.0) | New `action=` | +|--------------------------|---------------| +| `unraid_info` | `system` | +| `unraid_health` | `health` | +| `unraid_array` | `array` | +| `unraid_storage` | `disk` | +| `unraid_docker` | `docker` | +| `unraid_vm` | `vm` | +| `unraid_notifications` | `notification` | +| `unraid_keys` | `key` | +| `unraid_plugins` | `plugin` | +| `unraid_rclone` | `rclone` | +| `unraid_settings` | `setting` | +| `unraid_customization` | `customization` | +| `unraid_oidc` | `oidc` | +| `unraid_users` | `user` | +| `unraid_live` | `live` | diff --git a/skills/unraid/references/troubleshooting.md b/skills/unraid/references/troubleshooting.md index 7b2df9c..74e6fe2 100644 --- a/skills/unraid/references/troubleshooting.md +++ b/skills/unraid/references/troubleshooting.md @@ -1,36 +1,109 @@ -# Unraid API Troubleshooting Guide +# Unraid MCP — Troubleshooting Guide -Common issues and solutions when working with the Unraid GraphQL API. +## Credentials Not Configured -## "Cannot query field" error +**Error:** `CredentialsNotConfiguredError` or message containing `~/.unraid-mcp/.env` -Field name doesn't exist in your Unraid version. Use introspection to find valid fields: +**Fix:** Run setup to configure credentials interactively: -```bash -./scripts/unraid-query.sh -q "{ __type(name: \"TypeName\") { fields { name } } }" +```python +unraid(action="health", subaction="setup") ``` -## "API key validation failed" -- Check API key is correct and not truncated -- Verify key has appropriate permissions (use "Viewer" role) -- Ensure URL includes `/graphql` endpoint (e.g. `http://host/graphql`) +This writes `UNRAID_API_URL` and `UNRAID_API_KEY` to `~/.unraid-mcp/.env`. Re-run at any time to update or rotate credentials. -## Empty results -Many queries return empty arrays when no data exists: -- `docker.containers` - No containers running -- `vms` - No VMs configured (or VM service disabled) -- `notifications` - No active alerts -- `plugins` - No plugins installed +--- -This is normal behavior, not an error. Ensure your scripts handle empty arrays gracefully. +## Connection Failed / API Unreachable -## "VMs are not available" (GraphQL Error) -If the VM manager is disabled in Unraid settings, querying `{ vms { ... } }` will return a GraphQL error. -**Solution:** Check if VM service is enabled before querying, or use error handling (like `IGNORE_ERRORS=true` in dashboard scripts) to process partial data. +**Symptoms:** Timeout, connection refused, network error -## URL connection issues -- Use HTTPS (not HTTP) for remote access if configured -- For local access: `http://unraid-server-ip/graphql` -- For Unraid Connect: Use provided URL with token in hostname -- Use `-k` (insecure) with curl if using self-signed certs on local HTTPS -- Use `-L` (follow redirects) if Unraid redirects HTTP to HTTPS +**Diagnostic steps:** + +1. Test basic connectivity: + +```python +unraid(action="health", subaction="test_connection") +``` + +1. Full diagnostic report: + +```python +unraid(action="health", subaction="diagnose") +``` + +1. Check that `UNRAID_API_URL` in `~/.unraid-mcp/.env` points to the correct Unraid GraphQL endpoint. + +1. Verify the API key has the required roles. Get a new key: **Unraid UI → Settings → Management Access → API Keys → Create** (select "Viewer" role for read-only, or appropriate roles for mutations). + +--- + +## Invalid Action / Subaction + +**Error:** `Invalid action 'X'` or `Invalid subaction 'X' for action 'Y'` + +**Fix:** Check the domain table in `SKILL.md` for the exact `action=` and `subaction=` strings. Common mistakes: + +| Wrong | Correct | +|-------|---------| +| `action="info"` | `action="system"` | +| `action="notifications"` | `action="notification"` | +| `action="keys"` | `action="key"` | +| `action="plugins"` | `action="plugin"` | +| `action="settings"` | `action="setting"` | +| `subaction="unread"` | `subaction="mark_unread"` | + +--- + +## Destructive Action Blocked + +**Error:** `Action 'X' was not confirmed. Re-run with confirm=True to bypass elicitation.` + +**Fix:** Add `confirm=True` to the call: + +```python +unraid(action="array", subaction="stop_array", confirm=True) +unraid(action="vm", subaction="force_stop", vm_id="", confirm=True) +``` + +See the Destructive Actions table in `SKILL.md` for the full list. + +--- + +## Live Subscription Returns "Connecting" + +**Symptoms:** `unraid(action="live", ...)` returns `{"status": "connecting"}` + +**Explanation:** The persistent WebSocket subscription has not yet received its first event. Retry in a moment. + +**Known issue:** `live/array_state` uses `arraySubscription` which has a known Unraid API bug (returns null for a non-nullable field). This subscription may show "connecting" indefinitely. + +**Event-driven subscriptions** (`live/parity_progress`, `live/notifications_overview`, `live/owner`, `live/server_status`, `live/ups_status`) only populate when the server emits a change event. If the server is idle, these may never populate during a session. + +**Workaround for array state:** Use `unraid(action="system", subaction="array")` for a synchronous snapshot instead. + +--- + +## Rate Limit Exceeded + +**Limit:** 100 requests / 10 seconds + +**Symptoms:** HTTP 429 or rate limit error + +**Fix:** Space out requests. Avoid polling in tight loops. Use `live/` subscriptions for real-time data instead of polling `system/metrics` repeatedly. + +--- + +## Log Path Rejected + +**Error:** `Invalid log path` + +**Valid log path prefixes:** `/var/log/`, `/boot/logs/`, `/mnt/` + +Use `unraid(action="disk", subaction="log_files")` to list available logs before reading. + +--- + +## Container Logs Not Available + +Docker container stdout/stderr are **not accessible via the Unraid API**. SSH to the Unraid server and use `docker logs ` directly. diff --git a/tests/conftest.py b/tests/conftest.py index c29bb36..c1206c4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,6 +6,12 @@ from unittest.mock import AsyncMock, patch import pytest from fastmcp import FastMCP +from hypothesis import settings +from hypothesis.database import DirectoryBasedExampleDatabase + +# Configure hypothesis to use the .cache directory for its database +settings.register_profile("default", database=DirectoryBasedExampleDatabase(".cache/.hypothesis")) +settings.load_profile("default") @pytest.fixture diff --git a/tests/contract/test_response_contracts.py b/tests/contract/test_response_contracts.py index 654c302..dde8a13 100644 --- a/tests/contract/test_response_contracts.py +++ b/tests/contract/test_response_contracts.py @@ -70,7 +70,7 @@ class DockerMutationResult(BaseModel): """Shape returned by docker start/stop/pause/unpause mutations.""" success: bool - action: str + subaction: str container: Any = None @@ -287,48 +287,42 @@ class NotificationCreateResult(BaseModel): @pytest.fixture def _docker_mock() -> Generator[AsyncMock, None, None]: - with patch("unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock) as mock: + with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock: yield mock @pytest.fixture def _info_mock() -> Generator[AsyncMock, None, None]: - with patch("unraid_mcp.tools.info.make_graphql_request", new_callable=AsyncMock) as mock: + with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock: yield mock @pytest.fixture def _storage_mock() -> Generator[AsyncMock, None, None]: - with patch("unraid_mcp.tools.storage.make_graphql_request", new_callable=AsyncMock) as mock: + with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock: yield mock @pytest.fixture def _notifications_mock() -> Generator[AsyncMock, None, None]: - with patch( - "unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock - ) as mock: + with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock: yield mock def _docker_tool(): - return make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker") + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") def _info_tool(): - return make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") def _storage_tool(): - return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage") + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") def _notifications_tool(): - return make_tool_fn( - "unraid_mcp.tools.notifications", - "register_notifications_tool", - "unraid_notifications", - ) + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") # --------------------------------------------------------------------------- @@ -341,7 +335,7 @@ class TestDockerListContract: async def test_list_result_has_containers_key(self, _docker_mock: AsyncMock) -> None: _docker_mock.return_value = {"docker": {"containers": []}} - result = await _docker_tool()(action="list") + result = await _docker_tool()(action="docker", subaction="list") DockerListResult(**result) async def test_list_containers_conform_to_shape(self, _docker_mock: AsyncMock) -> None: @@ -353,14 +347,14 @@ class TestDockerListContract: ] } } - result = await _docker_tool()(action="list") + result = await _docker_tool()(action="docker", subaction="list") validated = DockerListResult(**result) for container in validated.containers: DockerContainer(**container) async def test_list_empty_containers_is_valid(self, _docker_mock: AsyncMock) -> None: _docker_mock.return_value = {"docker": {"containers": []}} - result = await _docker_tool()(action="list") + result = await _docker_tool()(action="docker", subaction="list") validated = DockerListResult(**result) assert validated.containers == [] @@ -369,7 +363,7 @@ class TestDockerListContract: _docker_mock.return_value = { "docker": {"containers": [{"id": "abc123", "names": ["plex"], "state": "running"}]} } - result = await _docker_tool()(action="list") + result = await _docker_tool()(action="docker", subaction="list") container_raw = result["containers"][0] DockerContainer(**container_raw) @@ -378,7 +372,7 @@ class TestDockerListContract: _docker_mock.return_value = { "docker": {"containers": [{"id": "abc123", "state": "running"}]} } - result = await _docker_tool()(action="list") + result = await _docker_tool()(action="docker", subaction="list") with pytest.raises(ValidationError): DockerContainer(**result["containers"][0]) @@ -403,7 +397,7 @@ class TestDockerDetailsContract: ] } } - result = await _docker_tool()(action="details", container_id=cid) + result = await _docker_tool()(action="docker", subaction="details", container_id=cid) DockerContainerDetails(**result) async def test_details_has_required_fields(self, _docker_mock: AsyncMock) -> None: @@ -411,7 +405,7 @@ class TestDockerDetailsContract: _docker_mock.return_value = { "docker": {"containers": [{"id": cid, "names": ["sonarr"], "state": "exited"}]} } - result = await _docker_tool()(action="details", container_id=cid) + result = await _docker_tool()(action="docker", subaction="details", container_id=cid) assert "id" in result assert "names" in result assert "state" in result @@ -424,7 +418,7 @@ class TestDockerNetworksContract: _docker_mock.return_value = { "docker": {"networks": [{"id": "net:1", "name": "bridge", "driver": "bridge"}]} } - result = await _docker_tool()(action="networks") + result = await _docker_tool()(action="docker", subaction="networks") DockerNetworkListResult(**result) async def test_network_entries_conform_to_shape(self, _docker_mock: AsyncMock) -> None: @@ -436,13 +430,13 @@ class TestDockerNetworksContract: ] } } - result = await _docker_tool()(action="networks") + result = await _docker_tool()(action="docker", subaction="networks") for net in result["networks"]: DockerNetwork(**net) async def test_empty_networks_is_valid(self, _docker_mock: AsyncMock) -> None: _docker_mock.return_value = {"docker": {"networks": []}} - result = await _docker_tool()(action="networks") + result = await _docker_tool()(action="docker", subaction="networks") validated = DockerNetworkListResult(**result) assert validated.networks == [] @@ -456,10 +450,10 @@ class TestDockerMutationContract: {"docker": {"containers": [{"id": cid, "names": ["plex"]}]}}, {"docker": {"start": {"id": cid, "names": ["plex"], "state": "running"}}}, ] - result = await _docker_tool()(action="start", container_id=cid) + result = await _docker_tool()(action="docker", subaction="start", container_id=cid) validated = DockerMutationResult(**result) assert validated.success is True - assert validated.action == "start" + assert validated.subaction == "start" async def test_stop_mutation_result_shape(self, _docker_mock: AsyncMock) -> None: cid = "d" * 64 + ":local" @@ -467,10 +461,10 @@ class TestDockerMutationContract: {"docker": {"containers": [{"id": cid, "names": ["nginx"]}]}}, {"docker": {"stop": {"id": cid, "names": ["nginx"], "state": "exited"}}}, ] - result = await _docker_tool()(action="stop", container_id=cid) + result = await _docker_tool()(action="docker", subaction="stop", container_id=cid) validated = DockerMutationResult(**result) assert validated.success is True - assert validated.action == "stop" + assert validated.subaction == "stop" # --------------------------------------------------------------------------- @@ -501,7 +495,7 @@ class TestInfoOverviewContract: "memory": {"layout": []}, } } - result = await _info_tool()(action="overview") + result = await _info_tool()(action="system", subaction="overview") validated = InfoOverviewResult(**result) assert isinstance(validated.summary, dict) assert isinstance(validated.details, dict) @@ -521,7 +515,7 @@ class TestInfoOverviewContract: "memory": {"layout": []}, } } - result = await _info_tool()(action="overview") + result = await _info_tool()(action="system", subaction="overview") InfoOverviewSummary(**result["summary"]) assert result["summary"]["hostname"] == "myserver" @@ -538,7 +532,7 @@ class TestInfoOverviewContract: "memory": {"layout": []}, } _info_mock.return_value = {"info": raw_info} - result = await _info_tool()(action="overview") + result = await _info_tool()(action="system", subaction="overview") assert result["details"] == raw_info @@ -557,7 +551,7 @@ class TestInfoArrayContract: "boot": None, } } - result = await _info_tool()(action="array") + result = await _info_tool()(action="system", subaction="array") validated = InfoArrayResult(**result) assert isinstance(validated.summary, dict) assert isinstance(validated.details, dict) @@ -572,7 +566,7 @@ class TestInfoArrayContract: "caches": [], } } - result = await _info_tool()(action="array") + result = await _info_tool()(action="system", subaction="array") ArraySummary(**result["summary"]) async def test_array_health_overall_healthy(self, _info_mock: AsyncMock) -> None: @@ -585,7 +579,7 @@ class TestInfoArrayContract: "caches": [], } } - result = await _info_tool()(action="array") + result = await _info_tool()(action="system", subaction="array") assert result["summary"]["overall_health"] == "HEALTHY" async def test_array_health_critical_with_failed_disk(self, _info_mock: AsyncMock) -> None: @@ -598,7 +592,7 @@ class TestInfoArrayContract: "caches": [], } } - result = await _info_tool()(action="array") + result = await _info_tool()(action="system", subaction="array") assert result["summary"]["overall_health"] == "CRITICAL" @@ -619,7 +613,7 @@ class TestInfoMetricsContract: }, } } - result = await _info_tool()(action="metrics") + result = await _info_tool()(action="system", subaction="metrics") validated = InfoMetricsResult(**result) assert validated.cpu is not None assert validated.memory is not None @@ -628,7 +622,7 @@ class TestInfoMetricsContract: _info_mock.return_value = { "metrics": {"cpu": {"percentTotal": 75.3}, "memory": {"percentTotal": 60.0}} } - result = await _info_tool()(action="metrics") + result = await _info_tool()(action="system", subaction="metrics") cpu_pct = result["cpu"]["percentTotal"] assert 0.0 <= cpu_pct <= 100.0 @@ -643,14 +637,14 @@ class TestInfoServicesContract: {"name": "docker", "online": True, "version": "24.0"}, ] } - result = await _info_tool()(action="services") + result = await _info_tool()(action="system", subaction="services") validated = InfoServicesResult(**result) for svc in validated.services: ServiceEntry(**svc) async def test_services_empty_list_is_valid(self, _info_mock: AsyncMock) -> None: _info_mock.return_value = {"services": []} - result = await _info_tool()(action="services") + result = await _info_tool()(action="system", subaction="services") InfoServicesResult(**result) assert result["services"] == [] @@ -660,13 +654,13 @@ class TestInfoOnlineContract: async def test_online_true_shape(self, _info_mock: AsyncMock) -> None: _info_mock.return_value = {"online": True} - result = await _info_tool()(action="online") + result = await _info_tool()(action="system", subaction="online") validated = InfoOnlineResult(**result) assert validated.online is True async def test_online_false_shape(self, _info_mock: AsyncMock) -> None: _info_mock.return_value = {"online": False} - result = await _info_tool()(action="online") + result = await _info_tool()(action="system", subaction="online") validated = InfoOnlineResult(**result) assert validated.online is False @@ -687,7 +681,7 @@ class TestInfoNetworkContract: ], "vars": {"port": 80, "portssl": 443, "localTld": "local", "useSsl": "no"}, } - result = await _info_tool()(action="network") + result = await _info_tool()(action="system", subaction="network") validated = InfoNetworkResult(**result) assert isinstance(validated.accessUrls, list) @@ -696,7 +690,7 @@ class TestInfoNetworkContract: "servers": [], "vars": {"port": 80, "portssl": 443, "localTld": "local", "useSsl": "no"}, } - result = await _info_tool()(action="network") + result = await _info_tool()(action="system", subaction="network") validated = InfoNetworkResult(**result) assert validated.accessUrls == [] @@ -716,21 +710,21 @@ class TestStorageSharesContract: {"id": "share:2", "name": "appdata", "free": 200000, "used": 50000, "size": 250000}, ] } - result = await _storage_tool()(action="shares") + result = await _storage_tool()(action="disk", subaction="shares") validated = StorageSharesResult(**result) for share in validated.shares: ShareEntry(**share) async def test_shares_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None: _storage_mock.return_value = {"shares": []} - result = await _storage_tool()(action="shares") + result = await _storage_tool()(action="disk", subaction="shares") StorageSharesResult(**result) assert result["shares"] == [] async def test_shares_missing_name_fails_contract(self, _storage_mock: AsyncMock) -> None: """A share without required 'name' must fail contract validation.""" _storage_mock.return_value = {"shares": [{"id": "share:1", "free": 100}]} - result = await _storage_tool()(action="shares") + result = await _storage_tool()(action="disk", subaction="shares") with pytest.raises(ValidationError): ShareEntry(**result["shares"][0]) @@ -745,14 +739,14 @@ class TestStorageDisksContract: {"id": "disk:2", "device": "sdb", "name": "Seagate_8TB"}, ] } - result = await _storage_tool()(action="disks") + result = await _storage_tool()(action="disk", subaction="disks") validated = StorageDisksResult(**result) for disk in validated.disks: DiskEntry(**disk) async def test_disks_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None: _storage_mock.return_value = {"disks": []} - result = await _storage_tool()(action="disks") + result = await _storage_tool()(action="disk", subaction="disks") StorageDisksResult(**result) assert result["disks"] == [] @@ -771,7 +765,7 @@ class TestStorageDiskDetailsContract: "temperature": 35, } } - result = await _storage_tool()(action="disk_details", disk_id="disk:1") + result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:1") validated = StorageDiskDetailsResult(**result) assert isinstance(validated.summary, dict) assert isinstance(validated.details, dict) @@ -787,7 +781,7 @@ class TestStorageDiskDetailsContract: "temperature": 40, } } - result = await _storage_tool()(action="disk_details", disk_id="disk:2") + result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:2") DiskDetailsSummary(**result["summary"]) async def test_disk_details_temperature_formatted(self, _storage_mock: AsyncMock) -> None: @@ -801,7 +795,7 @@ class TestStorageDiskDetailsContract: "temperature": 38, } } - result = await _storage_tool()(action="disk_details", disk_id="disk:3") + result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:3") assert "°C" in result["summary"]["temperature"] async def test_disk_details_no_temperature_shows_na(self, _storage_mock: AsyncMock) -> None: @@ -815,7 +809,7 @@ class TestStorageDiskDetailsContract: "temperature": None, } } - result = await _storage_tool()(action="disk_details", disk_id="disk:4") + result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:4") assert result["summary"]["temperature"] == "N/A" @@ -839,14 +833,14 @@ class TestStorageLogFilesContract: }, ] } - result = await _storage_tool()(action="log_files") + result = await _storage_tool()(action="disk", subaction="log_files") validated = StorageLogFilesResult(**result) for log_file in validated.log_files: LogFileEntry(**log_file) async def test_log_files_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None: _storage_mock.return_value = {"logFiles": []} - result = await _storage_tool()(action="log_files") + result = await _storage_tool()(action="disk", subaction="log_files") StorageLogFilesResult(**result) assert result["log_files"] == [] @@ -868,7 +862,7 @@ class TestNotificationsOverviewContract: } } } - result = await _notifications_tool()(action="overview") + result = await _notifications_tool()(action="notification", subaction="overview") validated = NotificationOverviewResult(**result) assert validated.unread is not None assert validated.archive is not None @@ -882,7 +876,7 @@ class TestNotificationsOverviewContract: } } } - result = await _notifications_tool()(action="overview") + result = await _notifications_tool()(action="notification", subaction="overview") NotificationCountBucket(**result["unread"]) NotificationCountBucket(**result["archive"]) @@ -895,7 +889,7 @@ class TestNotificationsOverviewContract: } } } - result = await _notifications_tool()(action="overview") + result = await _notifications_tool()(action="notification", subaction="overview") NotificationOverviewResult(**result) @@ -920,14 +914,14 @@ class TestNotificationsListContract: ] } } - result = await _notifications_tool()(action="list") + result = await _notifications_tool()(action="notification", subaction="list") validated = NotificationListResult(**result) for notif in validated.notifications: NotificationEntry(**notif) async def test_list_empty_notifications_valid(self, _notifications_mock: AsyncMock) -> None: _notifications_mock.return_value = {"notifications": {"list": []}} - result = await _notifications_tool()(action="list") + result = await _notifications_tool()(action="notification", subaction="list") NotificationListResult(**result) assert result["notifications"] == [] @@ -938,7 +932,7 @@ class TestNotificationsListContract: _notifications_mock.return_value = { "notifications": {"list": [{"title": "No ID here", "importance": "INFO"}]} } - result = await _notifications_tool()(action="list") + result = await _notifications_tool()(action="notification", subaction="list") with pytest.raises(ValidationError): NotificationEntry(**result["notifications"][0]) @@ -955,7 +949,8 @@ class TestNotificationsCreateContract: } } result = await _notifications_tool()( - action="create", + action="notification", + subaction="create", title="Test notification", subject="Test subject", description="This is a test", @@ -970,7 +965,8 @@ class TestNotificationsCreateContract: "createNotification": {"id": "notif:42", "title": "Alert!", "importance": "ALERT"} } result = await _notifications_tool()( - action="create", + action="notification", + subaction="create", title="Alert!", subject="Critical issue", description="Something went wrong", diff --git a/tests/http_layer/test_request_construction.py b/tests/http_layer/test_request_construction.py index c0f5196..acda9db 100644 --- a/tests/http_layer/test_request_construction.py +++ b/tests/http_layer/test_request_construction.py @@ -8,6 +8,7 @@ to verify the full request pipeline. """ import json +from collections.abc import Callable from typing import Any from unittest.mock import patch @@ -261,11 +262,11 @@ class TestGraphQLErrorHandling: class TestInfoToolRequests: - """Verify unraid_info tool constructs correct GraphQL queries.""" + """Verify unraid system tool constructs correct GraphQL queries.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_overview_sends_correct_query(self) -> None: @@ -281,7 +282,7 @@ class TestInfoToolRequests: ) ) tool = self._get_tool() - await tool(action="overview") + await tool(action="system", subaction="overview") body = _extract_request_body(route.calls.last.request) assert "GetSystemInfo" in body["query"] assert "info" in body["query"] @@ -292,7 +293,7 @@ class TestInfoToolRequests: return_value=_graphql_response({"array": {"state": "STARTED", "capacity": {}}}) ) tool = self._get_tool() - await tool(action="array") + await tool(action="system", subaction="array") body = _extract_request_body(route.calls.last.request) assert "GetArrayStatus" in body["query"] @@ -302,7 +303,7 @@ class TestInfoToolRequests: return_value=_graphql_response({"network": {"id": "n1", "accessUrls": []}}) ) tool = self._get_tool() - await tool(action="network") + await tool(action="system", subaction="network") body = _extract_request_body(route.calls.last.request) assert "GetNetworkInfo" in body["query"] @@ -314,7 +315,7 @@ class TestInfoToolRequests: ) ) tool = self._get_tool() - await tool(action="metrics") + await tool(action="system", subaction="metrics") body = _extract_request_body(route.calls.last.request) assert "GetMetrics" in body["query"] @@ -324,7 +325,7 @@ class TestInfoToolRequests: return_value=_graphql_response({"upsDeviceById": {"id": "ups1", "model": "APC"}}) ) tool = self._get_tool() - await tool(action="ups_device", device_id="ups1") + await tool(action="system", subaction="ups_device", device_id="ups1") body = _extract_request_body(route.calls.last.request) assert body["variables"] == {"id": "ups1"} assert "GetUpsDevice" in body["query"] @@ -333,7 +334,7 @@ class TestInfoToolRequests: async def test_online_sends_correct_query(self) -> None: route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) tool = self._get_tool() - await tool(action="online") + await tool(action="system", subaction="online") body = _extract_request_body(route.calls.last.request) assert "GetOnline" in body["query"] @@ -343,7 +344,7 @@ class TestInfoToolRequests: return_value=_graphql_response({"servers": [{"id": "s1", "name": "tower"}]}) ) tool = self._get_tool() - await tool(action="servers") + await tool(action="system", subaction="servers") body = _extract_request_body(route.calls.last.request) assert "GetServers" in body["query"] @@ -353,7 +354,7 @@ class TestInfoToolRequests: return_value=_graphql_response({"flash": {"id": "f1", "guid": "abc"}}) ) tool = self._get_tool() - await tool(action="flash") + await tool(action="system", subaction="flash") body = _extract_request_body(route.calls.last.request) assert "GetFlash" in body["query"] @@ -364,11 +365,11 @@ class TestInfoToolRequests: class TestDockerToolRequests: - """Verify unraid_docker tool constructs correct requests.""" + """Verify unraid docker tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_list_sends_correct_query(self) -> None: @@ -378,7 +379,7 @@ class TestDockerToolRequests: ) ) tool = self._get_tool() - await tool(action="list") + await tool(action="docker", subaction="list") body = _extract_request_body(route.calls.last.request) assert "ListDockerContainers" in body["query"] @@ -400,7 +401,7 @@ class TestDockerToolRequests: ) ) tool = self._get_tool() - await tool(action="start", container_id=container_id) + await tool(action="docker", subaction="start", container_id=container_id) body = _extract_request_body(route.calls.last.request) assert "StartContainer" in body["query"] assert body["variables"] == {"id": container_id} @@ -423,7 +424,7 @@ class TestDockerToolRequests: ) ) tool = self._get_tool() - await tool(action="stop", container_id=container_id) + await tool(action="docker", subaction="stop", container_id=container_id) body = _extract_request_body(route.calls.last.request) assert "StopContainer" in body["query"] assert body["variables"] == {"id": container_id} @@ -440,7 +441,7 @@ class TestDockerToolRequests: ) ) tool = self._get_tool() - await tool(action="networks") + await tool(action="docker", subaction="networks") body = _extract_request_body(route.calls.last.request) assert "GetDockerNetworks" in body["query"] @@ -484,9 +485,9 @@ class TestDockerToolRequests: respx.post(API_URL).mock(side_effect=side_effect) tool = self._get_tool() - result = await tool(action="restart", container_id=container_id) + result = await tool(action="docker", subaction="restart", container_id=container_id) assert result["success"] is True - assert result["action"] == "restart" + assert result["subaction"] == "restart" assert call_count == 2 @respx.mock @@ -499,7 +500,8 @@ class TestDockerToolRequests: nonlocal call_count body = json.loads(request.content.decode()) call_count += 1 - if "ResolveContainerID" in body["query"]: + if "skipCache" in body["query"]: + # Resolution query: docker { containers(skipCache: true) { id names } } return _graphql_response( {"docker": {"containers": [{"id": resolved_id, "names": ["plex"]}]}} ) @@ -520,7 +522,7 @@ class TestDockerToolRequests: respx.post(API_URL).mock(side_effect=side_effect) tool = self._get_tool() - result = await tool(action="start", container_id="plex") + result = await tool(action="docker", subaction="start", container_id="plex") assert call_count == 2 # resolve + start assert result["success"] is True @@ -531,11 +533,11 @@ class TestDockerToolRequests: class TestVMToolRequests: - """Verify unraid_vm tool constructs correct requests.""" + """Verify unraid vm tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_list_sends_correct_query(self) -> None: @@ -549,7 +551,7 @@ class TestVMToolRequests: ) ) tool = self._get_tool() - result = await tool(action="list") + result = await tool(action="vm", subaction="list") body = _extract_request_body(route.calls.last.request) assert "ListVMs" in body["query"] assert "vms" in result @@ -558,7 +560,7 @@ class TestVMToolRequests: async def test_start_sends_mutation_with_id(self) -> None: route = respx.post(API_URL).mock(return_value=_graphql_response({"vm": {"start": True}})) tool = self._get_tool() - result = await tool(action="start", vm_id="vm-123") + result = await tool(action="vm", subaction="start", vm_id="vm-123") body = _extract_request_body(route.calls.last.request) assert "StartVM" in body["query"] assert body["variables"] == {"id": "vm-123"} @@ -568,7 +570,7 @@ class TestVMToolRequests: async def test_stop_sends_mutation_with_id(self) -> None: route = respx.post(API_URL).mock(return_value=_graphql_response({"vm": {"stop": True}})) tool = self._get_tool() - await tool(action="stop", vm_id="vm-456") + await tool(action="vm", subaction="stop", vm_id="vm-456") body = _extract_request_body(route.calls.last.request) assert "StopVM" in body["query"] assert body["variables"] == {"id": "vm-456"} @@ -577,7 +579,7 @@ class TestVMToolRequests: async def test_force_stop_requires_confirm(self) -> None: tool = self._get_tool() with pytest.raises(ToolError, match="not confirmed"): - await tool(action="force_stop", vm_id="vm-789") + await tool(action="vm", subaction="force_stop", vm_id="vm-789") @respx.mock async def test_force_stop_sends_mutation_when_confirmed(self) -> None: @@ -585,7 +587,7 @@ class TestVMToolRequests: return_value=_graphql_response({"vm": {"forceStop": True}}) ) tool = self._get_tool() - result = await tool(action="force_stop", vm_id="vm-789", confirm=True) + result = await tool(action="vm", subaction="force_stop", vm_id="vm-789", confirm=True) body = _extract_request_body(route.calls.last.request) assert "ForceStopVM" in body["query"] assert result["success"] is True @@ -594,7 +596,7 @@ class TestVMToolRequests: async def test_reset_requires_confirm(self) -> None: tool = self._get_tool() with pytest.raises(ToolError, match="not confirmed"): - await tool(action="reset", vm_id="vm-abc") + await tool(action="vm", subaction="reset", vm_id="vm-abc") @respx.mock async def test_details_finds_vm_by_name(self) -> None: @@ -611,7 +613,7 @@ class TestVMToolRequests: ) ) tool = self._get_tool() - result = await tool(action="details", vm_id="ubuntu") + result = await tool(action="vm", subaction="details", vm_id="ubuntu") assert result["name"] == "ubuntu" @@ -621,11 +623,11 @@ class TestVMToolRequests: class TestArrayToolRequests: - """Verify unraid_array tool constructs correct requests.""" + """Verify unraid array tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.array", "register_array_tool", "unraid_array") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_parity_status_sends_correct_query(self) -> None: @@ -643,7 +645,7 @@ class TestArrayToolRequests: ) ) tool = self._get_tool() - result = await tool(action="parity_status") + result = await tool(action="array", subaction="parity_status") body = _extract_request_body(route.calls.last.request) assert "GetParityStatus" in body["query"] assert result["success"] is True @@ -654,7 +656,7 @@ class TestArrayToolRequests: return_value=_graphql_response({"parityCheck": {"start": True}}) ) tool = self._get_tool() - result = await tool(action="parity_start", correct=False) + result = await tool(action="array", subaction="parity_start", correct=False) body = _extract_request_body(route.calls.last.request) assert "StartParityCheck" in body["query"] assert body["variables"] == {"correct": False} @@ -666,7 +668,7 @@ class TestArrayToolRequests: return_value=_graphql_response({"parityCheck": {"start": True}}) ) tool = self._get_tool() - await tool(action="parity_start", correct=True) + await tool(action="array", subaction="parity_start", correct=True) body = _extract_request_body(route.calls.last.request) assert body["variables"] == {"correct": True} @@ -676,7 +678,7 @@ class TestArrayToolRequests: return_value=_graphql_response({"parityCheck": {"pause": True}}) ) tool = self._get_tool() - await tool(action="parity_pause") + await tool(action="array", subaction="parity_pause") body = _extract_request_body(route.calls.last.request) assert "PauseParityCheck" in body["query"] @@ -686,7 +688,7 @@ class TestArrayToolRequests: return_value=_graphql_response({"parityCheck": {"cancel": True}}) ) tool = self._get_tool() - await tool(action="parity_cancel") + await tool(action="array", subaction="parity_cancel") body = _extract_request_body(route.calls.last.request) assert "CancelParityCheck" in body["query"] @@ -697,11 +699,11 @@ class TestArrayToolRequests: class TestStorageToolRequests: - """Verify unraid_storage tool constructs correct requests.""" + """Verify unraid disk tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_shares_sends_correct_query(self) -> None: @@ -709,7 +711,7 @@ class TestStorageToolRequests: return_value=_graphql_response({"shares": [{"id": "s1", "name": "appdata"}]}) ) tool = self._get_tool() - result = await tool(action="shares") + result = await tool(action="disk", subaction="shares") body = _extract_request_body(route.calls.last.request) assert "GetSharesInfo" in body["query"] assert "shares" in result @@ -722,7 +724,7 @@ class TestStorageToolRequests: ) ) tool = self._get_tool() - await tool(action="disks") + await tool(action="disk", subaction="disks") body = _extract_request_body(route.calls.last.request) assert "ListPhysicalDisks" in body["query"] @@ -743,7 +745,7 @@ class TestStorageToolRequests: ) ) tool = self._get_tool() - await tool(action="disk_details", disk_id="d1") + await tool(action="disk", subaction="disk_details", disk_id="d1") body = _extract_request_body(route.calls.last.request) assert "GetDiskDetails" in body["query"] assert body["variables"] == {"id": "d1"} @@ -756,7 +758,7 @@ class TestStorageToolRequests: ) ) tool = self._get_tool() - result = await tool(action="log_files") + result = await tool(action="disk", subaction="log_files") body = _extract_request_body(route.calls.last.request) assert "ListLogFiles" in body["query"] assert "log_files" in result @@ -776,7 +778,7 @@ class TestStorageToolRequests: ) ) tool = self._get_tool() - await tool(action="logs", log_path="/var/log/syslog", tail_lines=50) + await tool(action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=50) body = _extract_request_body(route.calls.last.request) assert "GetLogContent" in body["query"] assert body["variables"]["path"] == "/var/log/syslog" @@ -786,7 +788,7 @@ class TestStorageToolRequests: async def test_logs_rejects_path_traversal(self) -> None: tool = self._get_tool() with pytest.raises(ToolError, match="log_path must start with"): - await tool(action="logs", log_path="/etc/shadow") + await tool(action="disk", subaction="logs", log_path="/etc/shadow") # =========================================================================== @@ -795,15 +797,11 @@ class TestStorageToolRequests: class TestNotificationsToolRequests: - """Verify unraid_notifications tool constructs correct requests.""" + """Verify unraid notification tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn( - "unraid_mcp.tools.notifications", - "register_notifications_tool", - "unraid_notifications", - ) + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_overview_sends_correct_query(self) -> None: @@ -819,7 +817,7 @@ class TestNotificationsToolRequests: ) ) tool = self._get_tool() - await tool(action="overview") + await tool(action="notification", subaction="overview") body = _extract_request_body(route.calls.last.request) assert "GetNotificationsOverview" in body["query"] @@ -829,7 +827,14 @@ class TestNotificationsToolRequests: return_value=_graphql_response({"notifications": {"list": []}}) ) tool = self._get_tool() - await tool(action="list", list_type="ARCHIVE", importance="WARNING", offset=5, limit=10) + await tool( + action="notification", + subaction="list", + list_type="ARCHIVE", + importance="WARNING", + offset=5, + limit=10, + ) body = _extract_request_body(route.calls.last.request) assert "ListNotifications" in body["query"] filt = body["variables"]["filter"] @@ -853,7 +858,8 @@ class TestNotificationsToolRequests: ) tool = self._get_tool() await tool( - action="create", + action="notification", + subaction="create", title="Test", subject="Sub", description="Desc", @@ -872,7 +878,7 @@ class TestNotificationsToolRequests: return_value=_graphql_response({"archiveNotification": {"id": "notif-1"}}) ) tool = self._get_tool() - await tool(action="archive", notification_id="notif-1") + await tool(action="notification", subaction="archive", notification_id="notif-1") body = _extract_request_body(route.calls.last.request) assert "ArchiveNotification" in body["query"] assert body["variables"] == {"id": "notif-1"} @@ -881,7 +887,12 @@ class TestNotificationsToolRequests: async def test_delete_requires_confirm(self) -> None: tool = self._get_tool() with pytest.raises(ToolError, match="not confirmed"): - await tool(action="delete", notification_id="n1", notification_type="UNREAD") + await tool( + action="notification", + subaction="delete", + notification_id="n1", + notification_type="UNREAD", + ) @respx.mock async def test_delete_sends_id_and_type(self) -> None: @@ -890,7 +901,8 @@ class TestNotificationsToolRequests: ) tool = self._get_tool() await tool( - action="delete", + action="notification", + subaction="delete", notification_id="n1", notification_type="unread", confirm=True, @@ -906,7 +918,7 @@ class TestNotificationsToolRequests: return_value=_graphql_response({"archiveAll": {"archive": {"total": 1}}}) ) tool = self._get_tool() - await tool(action="archive_all", importance="warning") + await tool(action="notification", subaction="archive_all", importance="warning") body = _extract_request_body(route.calls.last.request) assert "ArchiveAllNotifications" in body["query"] assert body["variables"]["importance"] == "WARNING" @@ -918,11 +930,11 @@ class TestNotificationsToolRequests: class TestRCloneToolRequests: - """Verify unraid_rclone tool constructs correct requests.""" + """Verify unraid rclone tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_list_remotes_sends_correct_query(self) -> None: @@ -932,7 +944,7 @@ class TestRCloneToolRequests: ) ) tool = self._get_tool() - result = await tool(action="list_remotes") + result = await tool(action="rclone", subaction="list_remotes") body = _extract_request_body(route.calls.last.request) assert "ListRCloneRemotes" in body["query"] assert "remotes" in result @@ -953,7 +965,7 @@ class TestRCloneToolRequests: ) ) tool = self._get_tool() - await tool(action="config_form", provider_type="s3") + await tool(action="rclone", subaction="config_form", provider_type="s3") body = _extract_request_body(route.calls.last.request) assert "GetRCloneConfigForm" in body["query"] assert body["variables"]["formOptions"]["providerType"] == "s3" @@ -975,7 +987,8 @@ class TestRCloneToolRequests: ) tool = self._get_tool() await tool( - action="create_remote", + action="rclone", + subaction="create_remote", name="my-s3", provider_type="s3", config_data={"bucket": "my-bucket"}, @@ -991,7 +1004,7 @@ class TestRCloneToolRequests: async def test_delete_remote_requires_confirm(self) -> None: tool = self._get_tool() with pytest.raises(ToolError, match="not confirmed"): - await tool(action="delete_remote", name="old-remote") + await tool(action="rclone", subaction="delete_remote", name="old-remote") @respx.mock async def test_delete_remote_sends_name_when_confirmed(self) -> None: @@ -999,7 +1012,9 @@ class TestRCloneToolRequests: return_value=_graphql_response({"rclone": {"deleteRCloneRemote": True}}) ) tool = self._get_tool() - result = await tool(action="delete_remote", name="old-remote", confirm=True) + result = await tool( + action="rclone", subaction="delete_remote", name="old-remote", confirm=True + ) body = _extract_request_body(route.calls.last.request) assert "DeleteRCloneRemote" in body["query"] assert body["variables"]["input"]["name"] == "old-remote" @@ -1012,11 +1027,11 @@ class TestRCloneToolRequests: class TestUsersToolRequests: - """Verify unraid_users tool constructs correct requests.""" + """Verify unraid user tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.users", "register_users_tool", "unraid_users") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_me_sends_correct_query(self) -> None: @@ -1033,7 +1048,7 @@ class TestUsersToolRequests: ) ) tool = self._get_tool() - result = await tool(action="me") + result = await tool(action="user", subaction="me") body = _extract_request_body(route.calls.last.request) assert "GetMe" in body["query"] assert result["name"] == "admin" @@ -1045,11 +1060,11 @@ class TestUsersToolRequests: class TestKeysToolRequests: - """Verify unraid_keys tool constructs correct requests.""" + """Verify unraid key tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_list_sends_correct_query(self) -> None: @@ -1057,7 +1072,7 @@ class TestKeysToolRequests: return_value=_graphql_response({"apiKeys": [{"id": "k1", "name": "my-key"}]}) ) tool = self._get_tool() - result = await tool(action="list") + result = await tool(action="key", subaction="list") body = _extract_request_body(route.calls.last.request) assert "ListApiKeys" in body["query"] assert "keys" in result @@ -1070,7 +1085,7 @@ class TestKeysToolRequests: ) ) tool = self._get_tool() - await tool(action="get", key_id="k1") + await tool(action="key", subaction="get", key_id="k1") body = _extract_request_body(route.calls.last.request) assert "GetApiKey" in body["query"] assert body["variables"] == {"id": "k1"} @@ -1092,7 +1107,7 @@ class TestKeysToolRequests: ) ) tool = self._get_tool() - result = await tool(action="create", name="new-key", roles=["read"]) + result = await tool(action="key", subaction="create", name="new-key", roles=["read"]) body = _extract_request_body(route.calls.last.request) assert "CreateApiKey" in body["query"] inp = body["variables"]["input"] @@ -1108,7 +1123,7 @@ class TestKeysToolRequests: ) ) tool = self._get_tool() - await tool(action="update", key_id="k1", name="renamed") + await tool(action="key", subaction="update", key_id="k1", name="renamed") body = _extract_request_body(route.calls.last.request) assert "UpdateApiKey" in body["query"] inp = body["variables"]["input"] @@ -1119,7 +1134,7 @@ class TestKeysToolRequests: async def test_delete_requires_confirm(self) -> None: tool = self._get_tool() with pytest.raises(ToolError, match="not confirmed"): - await tool(action="delete", key_id="k1") + await tool(action="key", subaction="delete", key_id="k1") @respx.mock async def test_delete_sends_ids_when_confirmed(self) -> None: @@ -1127,7 +1142,7 @@ class TestKeysToolRequests: return_value=_graphql_response({"apiKey": {"delete": True}}) ) tool = self._get_tool() - result = await tool(action="delete", key_id="k1", confirm=True) + result = await tool(action="key", subaction="delete", key_id="k1", confirm=True) body = _extract_request_body(route.calls.last.request) assert "DeleteApiKey" in body["query"] assert body["variables"]["input"]["ids"] == ["k1"] @@ -1140,17 +1155,17 @@ class TestKeysToolRequests: class TestHealthToolRequests: - """Verify unraid_health tool constructs correct requests.""" + """Verify unraid health tool constructs correct requests.""" @staticmethod - def _get_tool(): - return make_tool_fn("unraid_mcp.tools.health", "register_health_tool", "unraid_health") + def _get_tool() -> Callable[..., Any]: + return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") @respx.mock async def test_test_connection_sends_online_query(self) -> None: route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) tool = self._get_tool() - result = await tool(action="test_connection") + result = await tool(action="health", subaction="test_connection") body = _extract_request_body(route.calls.last.request) assert "online" in body["query"] assert result["status"] == "connected" @@ -1178,7 +1193,7 @@ class TestHealthToolRequests: ) ) tool = self._get_tool() - result = await tool(action="check") + result = await tool(action="health", subaction="check") body = _extract_request_body(route.calls.last.request) assert "ComprehensiveHealthCheck" in body["query"] assert result["status"] == "healthy" @@ -1188,7 +1203,7 @@ class TestHealthToolRequests: async def test_test_connection_measures_latency(self) -> None: respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) tool = self._get_tool() - result = await tool(action="test_connection") + result = await tool(action="health", subaction="test_connection") assert "latency_ms" in result assert isinstance(result["latency_ms"], float) @@ -1212,7 +1227,7 @@ class TestHealthToolRequests: ) ) tool = self._get_tool() - result = await tool(action="check") + result = await tool(action="health", subaction="check") assert result["status"] == "warning" assert any("alert" in issue for issue in result.get("issues", [])) @@ -1249,17 +1264,17 @@ class TestCrossCuttingConcerns: async def test_tool_error_from_http_layer_propagates(self) -> None: """When an HTTP error occurs, the ToolError bubbles up through the tool.""" respx.post(API_URL).mock(return_value=httpx.Response(500, text="Server Error")) - tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + tool = make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") with pytest.raises(ToolError, match="Unraid API returned HTTP 500"): - await tool(action="online") + await tool(action="system", subaction="online") @respx.mock async def test_network_error_propagates_through_tool(self) -> None: """When a network error occurs, the ToolError bubbles up through the tool.""" respx.post(API_URL).mock(side_effect=httpx.ConnectError("Connection refused")) - tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + tool = make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") with pytest.raises(ToolError, match="Network error connecting to Unraid API"): - await tool(action="online") + await tool(action="system", subaction="online") @respx.mock async def test_graphql_error_propagates_through_tool(self) -> None: @@ -1267,6 +1282,6 @@ class TestCrossCuttingConcerns: respx.post(API_URL).mock( return_value=_graphql_response(errors=[{"message": "Permission denied"}]) ) - tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + tool = make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid") with pytest.raises(ToolError, match="Permission denied"): - await tool(action="online") + await tool(action="system", subaction="online") diff --git a/tests/integration/test_subscriptions.py b/tests/integration/test_subscriptions.py index a5aad0b..debc2ff 100644 --- a/tests/integration/test_subscriptions.py +++ b/tests/integration/test_subscriptions.py @@ -816,6 +816,15 @@ class TestAutoStart: async def test_auto_start_only_starts_marked_subscriptions(self) -> None: mgr = SubscriptionManager() + # Clear default SNAPSHOT_ACTIONS configs; add one with auto_start=False + # to verify that unmarked subscriptions are never started. + mgr.subscription_configs.clear() + mgr.subscription_configs["no_auto_sub"] = { + "query": "subscription { test }", + "resource": "unraid://test", + "description": "Unmarked sub", + "auto_start": False, + } with patch.object(mgr, "start_subscription", new_callable=AsyncMock) as mock_start: await mgr.auto_start_all_subscriptions() mock_start.assert_not_called() @@ -837,6 +846,7 @@ class TestAutoStart: async def test_auto_start_calls_start_for_marked(self) -> None: mgr = SubscriptionManager() + mgr.subscription_configs.clear() mgr.subscription_configs["auto_sub"] = { "query": "subscription { auto }", "resource": "unraid://auto", diff --git a/tests/mcporter/README.md b/tests/mcporter/README.md index 2b4150e..edb7f8b 100644 --- a/tests/mcporter/README.md +++ b/tests/mcporter/README.md @@ -4,17 +4,7 @@ Live integration smoke-tests for the unraid-mcp server, exercising real API call --- -## Two Scripts, Two Transports - -| | `test-tools.sh` | `test-actions.sh` | -|-|-----------------|-------------------| -| **Transport** | stdio | HTTP | -| **Server required** | No — launched ad-hoc per call | Yes — must be running at `$MCP_URL` | -| **Flags** | `--timeout-ms N`, `--parallel`, `--verbose` | positional `[MCP_URL]` | -| **Coverage** | 10 tools (read-only actions only) | 11 tools (all non-destructive actions) | -| **Use case** | CI / offline local check | Live server smoke-test | - -### `test-tools.sh` — stdio, no running server needed +## `test-tools.sh` — stdio, no running server needed ```bash ./tests/mcporter/test-tools.sh # sequential, 25s timeout @@ -25,19 +15,9 @@ Live integration smoke-tests for the unraid-mcp server, exercising real API call Launches `uv run unraid-mcp-server` in stdio mode for each tool call. Requires `mcporter`, `uv`, and `python3` in `PATH`. Good for CI pipelines — no persistent server process needed. -### `test-actions.sh` — HTTP, requires a live server - -```bash -./tests/mcporter/test-actions.sh # default: http://localhost:6970/mcp -./tests/mcporter/test-actions.sh http://10.1.0.2:6970/mcp # explicit URL -UNRAID_MCP_URL=http://10.1.0.2:6970/mcp ./tests/mcporter/test-actions.sh -``` - -Connects to an already-running streamable-http server. Covers all read-only actions across 10 tools (`unraid_settings` is all-mutations and skipped; all destructive mutations are explicitly skipped). - --- -## What `test-actions.sh` Tests +## What `test-tools.sh` Tests ### Phase 1 — Param-free reads @@ -137,15 +117,10 @@ curl -LsSf https://astral.sh/uv/install.sh | sh # python3 — used for inline JSON extraction python3 --version # 3.12+ - -# Running server (for test-actions.sh only) -docker compose up -d -# or -uv run unraid-mcp-server ``` --- ## Cleanup -`test-actions.sh` connects to an existing server and leaves it running; it creates no temporary files. `test-tools.sh` spawns stdio server subprocesses per call — they exit when mcporter finishes each invocation — and may write a timestamped log file under `${TMPDIR:-/tmp}`. Neither script leaves background processes. +`test-tools.sh` spawns stdio server subprocesses per call — they exit when mcporter finishes each invocation — and may write a timestamped log file under `${TMPDIR:-/tmp}`. It does not leave background processes. diff --git a/tests/mcporter/test-actions.sh b/tests/mcporter/test-actions.sh deleted file mode 100755 index 26a287d..0000000 --- a/tests/mcporter/test-actions.sh +++ /dev/null @@ -1,407 +0,0 @@ -#!/usr/bin/env bash -# test-actions.sh — Test all non-destructive Unraid MCP actions via mcporter -# -# Usage: -# ./scripts/test-actions.sh [MCP_URL] -# -# Default MCP_URL: http://localhost:6970/mcp -# Skips: destructive (confirm=True required), state-changing mutations, -# and actions requiring IDs not yet discovered. -# -# Phase 1: param-free reads -# Phase 2: ID-discovered reads (container, network, disk, vm, key, log) - -set -euo pipefail - -MCP_URL="${1:-${UNRAID_MCP_URL:-http://localhost:6970/mcp}}" - -# ── colours ────────────────────────────────────────────────────────────────── -RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m' -CYAN='\033[0;36m'; BOLD='\033[1m'; NC='\033[0m' - -PASS=0; FAIL=0; SKIP=0 -declare -a FAILED_TESTS=() - -# ── helpers ─────────────────────────────────────────────────────────────────── - -mcall() { - # mcall - local tool="$1" args="$2" - mcporter call \ - --http-url "$MCP_URL" \ - --allow-http \ - --tool "$tool" \ - --args "$args" \ - --output json \ - 2>&1 -} - -_check_output() { - # Returns 0 if output looks like a successful JSON response, 1 otherwise. - local output="$1" exit_code="$2" - [[ $exit_code -ne 0 ]] && return 1 - echo "$output" | python3 -c " -import json, sys -try: - d = json.load(sys.stdin) - if isinstance(d, dict) and (d.get('isError') or d.get('error') or 'ToolError' in str(d)): - sys.exit(1) -except Exception: - pass -sys.exit(0) -" 2>/dev/null -} - -run_test() { - # Print result; do NOT echo the JSON body (kept quiet for readability). - local label="$1" tool="$2" args="$3" - printf " %-60s" "$label" - local output exit_code=0 - output=$(mcall "$tool" "$args" 2>&1) || exit_code=$? - if _check_output "$output" "$exit_code"; then - echo -e "${GREEN}PASS${NC}" - ((PASS++)) || true - else - echo -e "${RED}FAIL${NC}" - ((FAIL++)) || true - FAILED_TESTS+=("$label") - # Show first 3 lines of error detail, indented - echo "$output" | head -3 | sed 's/^/ /' - fi -} - -run_test_capture() { - # Like run_test but echoes raw JSON to stdout for ID extraction by caller. - # Status lines go to stderr so the caller's $() captures only clean JSON. - local label="$1" tool="$2" args="$3" - local output exit_code=0 - printf " %-60s" "$label" >&2 - output=$(mcall "$tool" "$args" 2>&1) || exit_code=$? - if _check_output "$output" "$exit_code"; then - echo -e "${GREEN}PASS${NC}" >&2 - ((PASS++)) || true - else - echo -e "${RED}FAIL${NC}" >&2 - ((FAIL++)) || true - FAILED_TESTS+=("$label") - echo "$output" | head -3 | sed 's/^/ /' >&2 - fi - echo "$output" # pure JSON → captured by caller's $() -} - -extract_id() { - # Extract an ID from JSON output using a Python snippet. - # Usage: ID=$(extract_id "$JSON_OUTPUT" "$LABEL" 'python expression') - # If JSON parsing fails (malformed mcporter output), record a FAIL. - # If parsing succeeds but finds no items, return empty (caller skips). - local json_input="$1" label="$2" py_code="$3" - local result="" py_exit=0 parse_err="" - # Capture stdout (the extracted ID) and stderr (any parse errors) separately. - # A temp file is needed because $() can only capture one stream. - local errfile - errfile=$(mktemp) - result=$(echo "$json_input" | python3 -c "$py_code" 2>"$errfile") || py_exit=$? - parse_err=$(<"$errfile") - rm -f "$errfile" - if [[ $py_exit -ne 0 ]]; then - printf " %-60s${RED}FAIL${NC} (JSON parse error)\n" "$label" >&2 - [[ -n "$parse_err" ]] && echo "$parse_err" | head -2 | sed 's/^/ /' >&2 - ((FAIL++)) || true - FAILED_TESTS+=("$label (JSON parse)") - echo "" - return 1 - fi - echo "$result" -} - -skip_test() { - local label="$1" reason="$2" - printf " %-60s${YELLOW}SKIP${NC} (%s)\n" "$label" "$reason" - ((SKIP++)) || true -} - -section() { - echo "" - echo -e "${CYAN}${BOLD}━━━ $1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" -} - -# ── connectivity check ──────────────────────────────────────────────────────── - -echo "" -echo -e "${BOLD}Unraid MCP Non-Destructive Action Test Suite${NC}" -echo -e "Server: ${CYAN}$MCP_URL${NC}" -echo "" -printf "Checking connectivity... " -# Use -s (silent) without -f: a 4xx/406 means the MCP server is up and -# responding correctly to a plain GET — only "connection refused" is fatal. -# Capture curl's exit code directly — don't mask failures with a fallback. -HTTP_CODE="" -curl_exit=0 -HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 "$MCP_URL" 2>/dev/null) || curl_exit=$? -if [[ $curl_exit -ne 0 ]]; then - echo -e "${RED}UNREACHABLE${NC} (curl exit code: $curl_exit)" - echo "Start the server first: docker compose up -d OR uv run unraid-mcp-server" - exit 1 -fi -echo -e "${GREEN}OK${NC} (HTTP $HTTP_CODE)" - -# ═══════════════════════════════════════════════════════════════════════════════ -# PHASE 1 — Param-free read actions -# ═══════════════════════════════════════════════════════════════════════════════ - -section "unraid_info (19 query actions)" -run_test "info: overview" unraid_info '{"action":"overview"}' -run_test "info: array" unraid_info '{"action":"array"}' -run_test "info: network" unraid_info '{"action":"network"}' -run_test "info: registration" unraid_info '{"action":"registration"}' -run_test "info: connect" unraid_info '{"action":"connect"}' -run_test "info: variables" unraid_info '{"action":"variables"}' -run_test "info: metrics" unraid_info '{"action":"metrics"}' -run_test "info: services" unraid_info '{"action":"services"}' -run_test "info: display" unraid_info '{"action":"display"}' -run_test "info: config" unraid_info '{"action":"config"}' -run_test "info: online" unraid_info '{"action":"online"}' -run_test "info: owner" unraid_info '{"action":"owner"}' -run_test "info: settings" unraid_info '{"action":"settings"}' -run_test "info: server" unraid_info '{"action":"server"}' -run_test "info: servers" unraid_info '{"action":"servers"}' -run_test "info: flash" unraid_info '{"action":"flash"}' -run_test "info: ups_devices" unraid_info '{"action":"ups_devices"}' -run_test "info: ups_device" unraid_info '{"action":"ups_device"}' -run_test "info: ups_config" unraid_info '{"action":"ups_config"}' -skip_test "info: update_server" "mutation — state-changing" -skip_test "info: update_ssh" "mutation — state-changing" - -section "unraid_array" -run_test "array: parity_status" unraid_array '{"action":"parity_status"}' -skip_test "array: parity_start" "mutation — starts parity check" -skip_test "array: parity_pause" "mutation — pauses parity check" -skip_test "array: parity_resume" "mutation — resumes parity check" -skip_test "array: parity_cancel" "mutation — cancels parity check" - -section "unraid_storage (param-free reads)" -STORAGE_DISKS=$(run_test_capture "storage: disks" unraid_storage '{"action":"disks"}') -run_test "storage: shares" unraid_storage '{"action":"shares"}' -run_test "storage: unassigned" unraid_storage '{"action":"unassigned"}' -LOG_FILES=$(run_test_capture "storage: log_files" unraid_storage '{"action":"log_files"}') -skip_test "storage: flash_backup" "destructive (confirm=True required)" - -section "unraid_docker (param-free reads)" -DOCKER_LIST=$(run_test_capture "docker: list" unraid_docker '{"action":"list"}') -DOCKER_NETS=$(run_test_capture "docker: networks" unraid_docker '{"action":"networks"}') -run_test "docker: port_conflicts" unraid_docker '{"action":"port_conflicts"}' -run_test "docker: check_updates" unraid_docker '{"action":"check_updates"}' -run_test "docker: sync_templates" unraid_docker '{"action":"sync_templates"}' -run_test "docker: refresh_digests" unraid_docker '{"action":"refresh_digests"}' -skip_test "docker: start" "mutation — changes container state" -skip_test "docker: stop" "mutation — changes container state" -skip_test "docker: restart" "mutation — changes container state" -skip_test "docker: pause" "mutation — changes container state" -skip_test "docker: unpause" "mutation — changes container state" -skip_test "docker: update" "mutation — updates container image" -skip_test "docker: remove" "destructive (confirm=True required)" -skip_test "docker: update_all" "destructive (confirm=True required)" -skip_test "docker: create_folder" "mutation — changes organizer state" -skip_test "docker: set_folder_children" "mutation — changes organizer state" -skip_test "docker: delete_entries" "destructive (confirm=True required)" -skip_test "docker: move_to_folder" "mutation — changes organizer state" -skip_test "docker: move_to_position" "mutation — changes organizer state" -skip_test "docker: rename_folder" "mutation — changes organizer state" -skip_test "docker: create_folder_with_items" "mutation — changes organizer state" -skip_test "docker: update_view_prefs" "mutation — changes organizer state" -skip_test "docker: reset_template_mappings" "destructive (confirm=True required)" - -section "unraid_vm (param-free reads)" -VM_LIST=$(run_test_capture "vm: list" unraid_vm '{"action":"list"}') -skip_test "vm: start" "mutation — changes VM state" -skip_test "vm: stop" "mutation — changes VM state" -skip_test "vm: pause" "mutation — changes VM state" -skip_test "vm: resume" "mutation — changes VM state" -skip_test "vm: reboot" "mutation — changes VM state" -skip_test "vm: force_stop" "destructive (confirm=True required)" -skip_test "vm: reset" "destructive (confirm=True required)" - -section "unraid_notifications" -run_test "notifications: overview" unraid_notifications '{"action":"overview"}' -run_test "notifications: list" unraid_notifications '{"action":"list"}' -run_test "notifications: warnings" unraid_notifications '{"action":"warnings"}' -run_test "notifications: recalculate" unraid_notifications '{"action":"recalculate"}' -skip_test "notifications: create" "mutation — creates notification" -skip_test "notifications: create_unique" "mutation — creates notification" -skip_test "notifications: archive" "mutation — changes notification state" -skip_test "notifications: unread" "mutation — changes notification state" -skip_test "notifications: archive_all" "mutation — changes notification state" -skip_test "notifications: archive_many" "mutation — changes notification state" -skip_test "notifications: unarchive_many" "mutation — changes notification state" -skip_test "notifications: unarchive_all" "mutation — changes notification state" -skip_test "notifications: delete" "destructive (confirm=True required)" -skip_test "notifications: delete_archived" "destructive (confirm=True required)" - -section "unraid_rclone" -run_test "rclone: list_remotes" unraid_rclone '{"action":"list_remotes"}' -run_test "rclone: config_form" unraid_rclone '{"action":"config_form"}' -skip_test "rclone: create_remote" "mutation — creates remote" -skip_test "rclone: delete_remote" "destructive (confirm=True required)" - -section "unraid_users" -run_test "users: me" unraid_users '{"action":"me"}' - -section "unraid_keys" -KEYS_LIST=$(run_test_capture "keys: list" unraid_keys '{"action":"list"}') -skip_test "keys: create" "mutation — creates API key" -skip_test "keys: update" "mutation — modifies API key" -skip_test "keys: delete" "destructive (confirm=True required)" - -section "unraid_health" -run_test "health: check" unraid_health '{"action":"check"}' -run_test "health: test_connection" unraid_health '{"action":"test_connection"}' -run_test "health: diagnose" unraid_health '{"action":"diagnose"}' - -section "unraid_settings (all mutations — skipped)" -skip_test "settings: update" "mutation — modifies settings" -skip_test "settings: update_temperature" "mutation — modifies settings" -skip_test "settings: update_time" "mutation — modifies settings" -skip_test "settings: configure_ups" "destructive (confirm=True required)" -skip_test "settings: update_api" "mutation — modifies settings" -skip_test "settings: connect_sign_in" "mutation — authentication action" -skip_test "settings: connect_sign_out" "mutation — authentication action" -skip_test "settings: setup_remote_access" "destructive (confirm=True required)" -skip_test "settings: enable_dynamic_remote_access" "destructive (confirm=True required)" - -# ═══════════════════════════════════════════════════════════════════════════════ -# PHASE 2 — ID-discovered read actions -# ═══════════════════════════════════════════════════════════════════════════════ - -section "Phase 2: ID-discovered reads" - -# ── docker container ID ─────────────────────────────────────────────────────── -CONTAINER_ID=$(extract_id "$DOCKER_LIST" "docker: extract container ID" " -import json, sys -d = json.load(sys.stdin) -containers = d.get('containers') or d.get('data', {}).get('containers') or [] -if isinstance(containers, list) and containers: - c = containers[0] - cid = c.get('id') or c.get('names', [''])[0].lstrip('/') - if cid: - print(cid) -") - -if [[ -n "$CONTAINER_ID" ]]; then - run_test "docker: details (id=$CONTAINER_ID)" \ - unraid_docker "{\"action\":\"details\",\"container_id\":\"$CONTAINER_ID\"}" - run_test "docker: logs (id=$CONTAINER_ID)" \ - unraid_docker "{\"action\":\"logs\",\"container_id\":\"$CONTAINER_ID\",\"tail_lines\":20}" -else - skip_test "docker: details" "no containers found to discover ID" - skip_test "docker: logs" "no containers found to discover ID" -fi - -# ── docker network ID ───────────────────────────────────────────────────────── -NETWORK_ID=$(extract_id "$DOCKER_NETS" "docker: extract network ID" " -import json, sys -d = json.load(sys.stdin) -nets = d.get('networks') or d.get('data', {}).get('networks') or [] -if isinstance(nets, list) and nets: - nid = nets[0].get('id') or nets[0].get('Id') - if nid: - print(nid) -") - -if [[ -n "$NETWORK_ID" ]]; then - run_test "docker: network_details (id=$NETWORK_ID)" \ - unraid_docker "{\"action\":\"network_details\",\"network_id\":\"$NETWORK_ID\"}" -else - skip_test "docker: network_details" "no networks found to discover ID" -fi - -# ── disk ID ─────────────────────────────────────────────────────────────────── -DISK_ID=$(extract_id "$STORAGE_DISKS" "storage: extract disk ID" " -import json, sys -d = json.load(sys.stdin) -disks = d.get('disks') or d.get('data', {}).get('disks') or [] -if isinstance(disks, list) and disks: - did = disks[0].get('id') or disks[0].get('device') - if did: - print(did) -") - -if [[ -n "$DISK_ID" ]]; then - run_test "storage: disk_details (id=$DISK_ID)" \ - unraid_storage "{\"action\":\"disk_details\",\"disk_id\":\"$DISK_ID\"}" -else - skip_test "storage: disk_details" "no disks found to discover ID" -fi - -# ── log path ────────────────────────────────────────────────────────────────── -LOG_PATH=$(extract_id "$LOG_FILES" "storage: extract log path" " -import json, sys -d = json.load(sys.stdin) -files = d.get('log_files') or d.get('files') or d.get('data', {}).get('log_files') or [] -if isinstance(files, list) and files: - p = files[0].get('path') or (files[0] if isinstance(files[0], str) else None) - if p: - print(p) -") - -if [[ -n "$LOG_PATH" ]]; then - run_test "storage: logs (path=$LOG_PATH)" \ - unraid_storage "{\"action\":\"logs\",\"log_path\":\"$LOG_PATH\",\"tail_lines\":20}" -else - skip_test "storage: logs" "no log files found to discover path" -fi - -# ── VM ID ───────────────────────────────────────────────────────────────────── -VM_ID=$(extract_id "$VM_LIST" "vm: extract VM ID" " -import json, sys -d = json.load(sys.stdin) -vms = d.get('vms') or d.get('data', {}).get('vms') or [] -if isinstance(vms, list) and vms: - vid = vms[0].get('uuid') or vms[0].get('id') or vms[0].get('name') - if vid: - print(vid) -") - -if [[ -n "$VM_ID" ]]; then - run_test "vm: details (id=$VM_ID)" \ - unraid_vm "{\"action\":\"details\",\"vm_id\":\"$VM_ID\"}" -else - skip_test "vm: details" "no VMs found to discover ID" -fi - -# ── API key ID ──────────────────────────────────────────────────────────────── -KEY_ID=$(extract_id "$KEYS_LIST" "keys: extract key ID" " -import json, sys -d = json.load(sys.stdin) -keys = d.get('keys') or d.get('apiKeys') or d.get('data', {}).get('keys') or [] -if isinstance(keys, list) and keys: - kid = keys[0].get('id') - if kid: - print(kid) -") - -if [[ -n "$KEY_ID" ]]; then - run_test "keys: get (id=$KEY_ID)" \ - unraid_keys "{\"action\":\"get\",\"key_id\":\"$KEY_ID\"}" -else - skip_test "keys: get" "no API keys found to discover ID" -fi - -# ═══════════════════════════════════════════════════════════════════════════════ -# SUMMARY -# ═══════════════════════════════════════════════════════════════════════════════ - -TOTAL=$((PASS + FAIL + SKIP)) -echo "" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" -echo -e "${BOLD}Results: ${GREEN}${PASS} passed${NC} ${RED}${FAIL} failed${NC} ${YELLOW}${SKIP} skipped${NC} (${TOTAL} total)" - -if [[ ${#FAILED_TESTS[@]} -gt 0 ]]; then - echo "" - echo -e "${RED}${BOLD}Failed tests:${NC}" - for t in "${FAILED_TESTS[@]}"; do - echo -e " ${RED}✗${NC} $t" - done -fi - -echo "" -[[ $FAIL -eq 0 ]] && exit 0 || exit 1 diff --git a/tests/mcporter/test-destructive.sh b/tests/mcporter/test-destructive.sh index af46f52..8bc04ee 100755 --- a/tests/mcporter/test-destructive.sh +++ b/tests/mcporter/test-destructive.sh @@ -149,8 +149,8 @@ test_notifications_delete() { # Create the notification local create_raw - create_raw="$(mcall unraid_notifications \ - '{"action":"create","title":"mcp-test-delete","subject":"MCP destructive test","description":"Safe to delete","importance":"INFO"}')" + create_raw="$(mcall unraid \ + '{"action":"notification","subaction":"create","title":"mcp-test-delete","subject":"MCP destructive test","description":"Safe to delete","importance":"INFO"}')" local create_ok create_ok="$(python3 -c "import json,sys; d=json.loads('''${create_raw}'''); print(d.get('success', False))" 2>/dev/null)" if [[ "${create_ok}" != "True" ]]; then @@ -161,7 +161,7 @@ test_notifications_delete() { # The create response ID doesn't match the stored filename — list and find by title. # Use the LAST match so a stale notification with the same title is bypassed. local list_raw nid - list_raw="$(mcall unraid_notifications '{"action":"list","notification_type":"UNREAD"}')" + list_raw="$(mcall unraid '{"action":"notification","subaction":"list","notification_type":"UNREAD"}')" nid="$(python3 -c " import json,sys d = json.loads('''${list_raw}''') @@ -177,8 +177,8 @@ print(matches[0] if matches else '') fi local del_raw - del_raw="$(mcall unraid_notifications \ - "{\"action\":\"delete\",\"notification_id\":\"${nid}\",\"notification_type\":\"UNREAD\",\"confirm\":true}")" + del_raw="$(mcall unraid \ + "{\"action\":\"notification\",\"subaction\":\"delete\",\"notification_id\":\"${nid}\",\"notification_type\":\"UNREAD\",\"confirm\":true}")" # success=true OR deleteNotification key present (raw GraphQL response) both indicate success local success success="$(python3 -c " @@ -190,7 +190,7 @@ print(ok) if [[ "${success}" != "True" ]]; then # Leak: notification created but not deleted — archive it so it doesn't clutter the feed - mcall unraid_notifications "{\"action\":\"archive\",\"notification_id\":\"${nid}\"}" &>/dev/null || true + mcall unraid "{\"action\":\"notification\",\"subaction\":\"archive\",\"notification_id\":\"${nid}\"}" &>/dev/null || true fail_test "${label}" "delete did not return success=true: ${del_raw} (notification archived as fallback cleanup)" return fi @@ -201,7 +201,7 @@ print(ok) if ${CONFIRM}; then test_notifications_delete else - dry_run "notifications: delete [create notification → mcall unraid_notifications delete]" + dry_run "notifications: delete [create notification → mcall unraid action=notification subaction=delete]" fi # --------------------------------------------------------------------------- @@ -227,7 +227,7 @@ test_keys_delete() { # Guard: abort if test key already exists (don't delete a real key) # Note: API key names cannot contain hyphens — use "mcp test key" local existing_keys - existing_keys="$(mcall unraid_keys '{"action":"list"}')" + existing_keys="$(mcall unraid '{"action":"key","subaction":"list"}')" if python3 -c " import json,sys d = json.loads('''${existing_keys}''') @@ -241,8 +241,8 @@ sys.exit(1 if any(k.get('name') == 'mcp test key' for k in keys) else 0) fi local create_raw - create_raw="$(mcall unraid_keys \ - '{"action":"create","name":"mcp test key","roles":["VIEWER"]}')" + create_raw="$(mcall unraid \ + '{"action":"key","subaction":"create","name":"mcp test key","roles":["VIEWER"]}')" local kid kid="$(python3 -c "import json,sys; d=json.loads('''${create_raw}'''); print(d.get('key',{}).get('id',''))" 2>/dev/null)" @@ -252,20 +252,20 @@ sys.exit(1 if any(k.get('name') == 'mcp test key' for k in keys) else 0) fi local del_raw - del_raw="$(mcall unraid_keys "{\"action\":\"delete\",\"key_id\":\"${kid}\",\"confirm\":true}")" + del_raw="$(mcall unraid "{\"action\":\"key\",\"subaction\":\"delete\",\"key_id\":\"${kid}\",\"confirm\":true}")" local success success="$(python3 -c "import json,sys; d=json.loads('''${del_raw}'''); print(d.get('success', False))" 2>/dev/null)" if [[ "${success}" != "True" ]]; then # Cleanup: attempt to delete the leaked key so future runs are not blocked - mcall unraid_keys "{\"action\":\"delete\",\"key_id\":\"${kid}\",\"confirm\":true}" &>/dev/null || true + mcall unraid "{\"action\":\"key\",\"subaction\":\"delete\",\"key_id\":\"${kid}\",\"confirm\":true}" &>/dev/null || true fail_test "${label}" "delete did not return success=true: ${del_raw} (key delete re-attempted as fallback cleanup)" return fi # Verify gone local list_raw - list_raw="$(mcall unraid_keys '{"action":"list"}')" + list_raw="$(mcall unraid '{"action":"key","subaction":"list"}')" if python3 -c " import json,sys d = json.loads('''${list_raw}''') @@ -281,7 +281,7 @@ sys.exit(0 if not any(k.get('id') == '${kid}' for k in keys) else 1) if ${CONFIRM}; then test_keys_delete else - dry_run "keys: delete [create test key → mcall unraid_keys delete]" + dry_run "keys: delete [create test key → mcall unraid action=key subaction=delete]" fi # --------------------------------------------------------------------------- diff --git a/tests/mcporter/test-tools.sh b/tests/mcporter/test-tools.sh index 71ca9a0..e7e9fdc 100755 --- a/tests/mcporter/test-tools.sh +++ b/tests/mcporter/test-tools.sh @@ -2,12 +2,12 @@ # ============================================================================= # test-tools.sh — Integration smoke-test for unraid-mcp MCP server tools # -# Exercises every non-destructive action across all 10 tools using mcporter. -# The server is launched ad-hoc via mcporter's --stdio flag so no persistent -# process or registered server entry is required. +# Exercises broad non-destructive smoke coverage of the consolidated `unraid` tool +# (action + subaction pattern). The server is launched ad-hoc via mcporter's +# --stdio flag so no persistent process or registered server entry is required. # # Usage: -# ./scripts/test-tools.sh [--timeout-ms N] [--parallel] [--verbose] +# ./tests/mcporter/test-tools.sh [--timeout-ms N] [--parallel] [--verbose] # # Options: # --timeout-ms N Per-call timeout in milliseconds (default: 25000) @@ -134,6 +134,11 @@ check_prerequisites() { missing=true fi + if ! command -v jq &>/dev/null; then + log_error "jq not found in PATH. Install it and re-run." + missing=true + fi + if [[ ! -f "${PROJECT_DIR}/pyproject.toml" ]]; then log_error "pyproject.toml not found at ${PROJECT_DIR}. Wrong directory?" missing=true @@ -146,9 +151,8 @@ check_prerequisites() { # --------------------------------------------------------------------------- # Server startup smoke-test -# Launches the stdio server and calls unraid_health action=check. -# Returns 0 if the server responds (even with an API error — that still -# means the Python process started cleanly), non-zero on import failure. +# Launches the stdio server and calls unraid action=health subaction=check. +# Returns 0 if the server responds, non-zero on import failure. # --------------------------------------------------------------------------- smoke_test_server() { log_info "Smoke-testing server startup..." @@ -159,14 +163,13 @@ smoke_test_server() { --stdio "uv run unraid-mcp-server" \ --cwd "${PROJECT_DIR}" \ --name "unraid-smoke" \ - --tool unraid_health \ - --args '{"action":"check"}' \ + --tool unraid \ + --args '{"action":"health","subaction":"check"}' \ --timeout 30000 \ --output json \ 2>&1 )" || true - # If mcporter returns the offline error the server failed to import/start if printf '%s' "${output}" | grep -q '"kind": "offline"'; then log_error "Server failed to start. Output:" printf '%s\n' "${output}" >&2 @@ -177,18 +180,18 @@ smoke_test_server() { return 2 fi - # Assert the response contains a valid tool response field, not a bare JSON error. - # unraid_health action=check always returns {"status": ...} on success. local key_check key_check="$( printf '%s' "${output}" | python3 -c " import sys, json try: d = json.load(sys.stdin) - if 'status' in d or 'success' in d or 'error' in d: + if 'error' in d: + print('error: tool returned error key — ' + str(d.get('error', ''))) + elif 'status' in d or 'success' in d: print('ok') else: - print('missing: no status/success/error key in response') + print('missing: no status/success key in response') except Exception as e: print('parse_error: ' + str(e)) " 2>/dev/null @@ -206,46 +209,38 @@ except Exception as e: # --------------------------------------------------------------------------- # mcporter call wrapper -# Usage: mcporter_call -# Writes the mcporter JSON output to stdout. -# Returns the mcporter exit code. +# Usage: mcporter_call +# All calls go to the single `unraid` tool. # --------------------------------------------------------------------------- mcporter_call() { - local tool_name="${1:?tool_name required}" - local args_json="${2:?args_json required}" + local args_json="${1:?args_json required}" + # Redirect stderr to the log file so startup warnings/logs don't pollute the JSON stdout. mcporter call \ --stdio "uv run unraid-mcp-server" \ --cwd "${PROJECT_DIR}" \ --name "unraid" \ - --tool "${tool_name}" \ + --tool unraid \ --args "${args_json}" \ --timeout "${CALL_TIMEOUT_MS}" \ --output json \ - 2>&1 + 2>>"${LOG_FILE}" } # --------------------------------------------------------------------------- # Test runner -# Usage: run_test