mirror of
https://github.com/jmagar/unraid-mcp.git
synced 2026-03-23 12:39:24 -07:00
Compare commits
29 Commits
main
...
feat/googl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e68d4a80e4 | ||
|
|
dc1e5f18d8 | ||
|
|
2b777be927 | ||
|
|
d59f8c22a8 | ||
|
|
cc24f1ec62 | ||
|
|
6f7a58a0f9 | ||
|
|
440245108a | ||
|
|
9754261402 | ||
|
|
9e9915b2fa | ||
|
|
2ab61be2df | ||
|
|
b319cf4932 | ||
|
|
0f46cb9713 | ||
|
|
1248ccd53e | ||
|
|
4a1ffcfd51 | ||
|
|
f69aa94826 | ||
|
|
5187cf730f | ||
|
|
896fc8db1b | ||
|
|
7db878b80b | ||
|
|
3888b9cb4a | ||
|
|
cf9449a15d | ||
|
|
884319ab11 | ||
|
|
efaab031ae | ||
|
|
dab1cd6995 | ||
|
|
faf9fb9ad7 | ||
|
|
fe7b6485fd | ||
|
|
d7545869e2 | ||
|
|
cdab970c12 | ||
|
|
80d2dd39ee | ||
|
|
aa5fa3e177 |
@@ -31,32 +31,34 @@ This directory contains the Claude Code marketplace configuration for the Unraid
|
||||
Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring.
|
||||
|
||||
**Features:**
|
||||
- 11 tools with ~104 actions (queries and mutations)
|
||||
- Real-time system metrics
|
||||
- 1 consolidated `unraid` tool with ~108 actions across 15 domains
|
||||
- Real-time live subscriptions (CPU, memory, logs, array state, UPS)
|
||||
- Disk health and temperature monitoring
|
||||
- Docker container management
|
||||
- VM status and control
|
||||
- Log file access
|
||||
- Network share information
|
||||
- Notification management
|
||||
- Plugin, rclone, API key, and OIDC management
|
||||
|
||||
**Version:** 0.2.0
|
||||
**Version:** 1.0.0
|
||||
**Category:** Infrastructure
|
||||
**Tags:** unraid, monitoring, homelab, graphql, docker, virtualization
|
||||
|
||||
## Configuration
|
||||
|
||||
After installation, configure your Unraid server credentials:
|
||||
After installation, run setup to configure credentials interactively:
|
||||
|
||||
```bash
|
||||
export UNRAID_API_URL="https://your-unraid-server/graphql"
|
||||
export UNRAID_API_KEY="your-api-key"
|
||||
```python
|
||||
unraid(action="health", subaction="setup")
|
||||
```
|
||||
|
||||
Credentials are stored at `~/.unraid-mcp/.env` automatically.
|
||||
|
||||
**Getting an API Key:**
|
||||
1. Open Unraid WebUI
|
||||
2. Go to Settings → Management Access → API Keys
|
||||
3. Click "Create" and select "Viewer" role
|
||||
3. Click "Create" and select "Viewer" role (or appropriate roles for mutations)
|
||||
4. Copy the generated API key
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
"email": "jmagar@users.noreply.github.com"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Comprehensive Unraid server management and monitoring tools via GraphQL API",
|
||||
"version": "0.2.0",
|
||||
"description": "Comprehensive Unraid server management and monitoring via a single consolidated MCP tool (~108 actions across 15 domains)",
|
||||
"version": "1.0.0",
|
||||
"homepage": "https://github.com/jmagar/unraid-mcp",
|
||||
"repository": "https://github.com/jmagar/unraid-mcp"
|
||||
},
|
||||
@@ -14,8 +14,8 @@
|
||||
{
|
||||
"name": "unraid",
|
||||
"source": "./",
|
||||
"description": "Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring",
|
||||
"version": "0.2.0",
|
||||
"description": "Query and monitor Unraid servers via GraphQL API — single `unraid` tool with action+subaction routing for array, disk, docker, VM, notifications, live metrics, and more",
|
||||
"version": "1.0.0",
|
||||
"tags": ["unraid", "monitoring", "homelab", "graphql", "docker", "virtualization"],
|
||||
"category": "infrastructure"
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "unraid",
|
||||
"description": "Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring",
|
||||
"version": "0.6.0",
|
||||
"version": "1.1.2",
|
||||
"author": {
|
||||
"name": "jmagar",
|
||||
"email": "jmagar@users.noreply.github.com"
|
||||
|
||||
44
.env.example
44
.env.example
@@ -35,3 +35,47 @@ UNRAID_MAX_RECONNECT_ATTEMPTS=10
|
||||
# Optional: Custom log file path for subscription auto-start diagnostics
|
||||
# Defaults to standard log if not specified
|
||||
# UNRAID_AUTOSTART_LOG_PATH=/custom/path/to/autostart.log
|
||||
|
||||
# Credentials Directory Override (Optional)
|
||||
# -----------------------------------------
|
||||
# Override the credentials directory (default: ~/.unraid-mcp/)
|
||||
# UNRAID_CREDENTIALS_DIR=/custom/path/to/credentials
|
||||
|
||||
# Google OAuth Protection (Optional)
|
||||
# -----------------------------------
|
||||
# Protects the MCP HTTP server — clients must authenticate with Google before calling tools.
|
||||
# Requires streamable-http or sse transport (not stdio).
|
||||
#
|
||||
# Setup:
|
||||
# 1. Google Cloud Console → APIs & Services → Credentials
|
||||
# 2. Create OAuth 2.0 Client ID (Web application)
|
||||
# 3. Authorized redirect URIs: <UNRAID_MCP_BASE_URL>/auth/callback
|
||||
# 4. Copy Client ID and Client Secret below
|
||||
#
|
||||
# UNRAID_MCP_BASE_URL: Public URL clients use to reach THIS server (for redirect URIs).
|
||||
# Examples:
|
||||
# http://10.1.0.2:6970 (LAN)
|
||||
# http://100.x.x.x:6970 (Tailscale)
|
||||
# https://mcp.yourdomain.com (reverse proxy)
|
||||
#
|
||||
# UNRAID_MCP_JWT_SIGNING_KEY: Stable secret for signing FastMCP JWT tokens.
|
||||
# Generate once: python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
# NEVER change after first use — all client sessions will be invalidated.
|
||||
#
|
||||
# Leave GOOGLE_CLIENT_ID empty to disable OAuth (server runs unprotected).
|
||||
# GOOGLE_CLIENT_ID=
|
||||
# GOOGLE_CLIENT_SECRET=
|
||||
# UNRAID_MCP_BASE_URL=http://10.1.0.2:6970
|
||||
# UNRAID_MCP_JWT_SIGNING_KEY=<generate with command above>
|
||||
|
||||
# API Key Authentication (Optional)
|
||||
# -----------------------------------
|
||||
# Alternative to Google OAuth — clients present this key as a bearer token:
|
||||
# Authorization: Bearer <UNRAID_MCP_API_KEY>
|
||||
#
|
||||
# Can be the same value as UNRAID_API_KEY (reuse your Unraid key), or a
|
||||
# separate dedicated secret. Set both GOOGLE_CLIENT_ID and UNRAID_MCP_API_KEY
|
||||
# to accept either auth method (MultiAuth).
|
||||
#
|
||||
# Leave empty to disable API key auth.
|
||||
# UNRAID_MCP_API_KEY=
|
||||
79
.github/workflows/ci.yml
vendored
Normal file
79
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main", "feat/**", "fix/**"]
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint & Format
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "0.9.25"
|
||||
- name: Install dependencies
|
||||
run: uv sync --group dev
|
||||
- name: Ruff check
|
||||
run: uv run ruff check unraid_mcp/ tests/
|
||||
- name: Ruff format
|
||||
run: uv run ruff format --check unraid_mcp/ tests/
|
||||
|
||||
typecheck:
|
||||
name: Type Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "0.9.25"
|
||||
- name: Install dependencies
|
||||
run: uv sync --group dev
|
||||
- name: ty check
|
||||
run: uv run ty check unraid_mcp/
|
||||
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "0.9.25"
|
||||
- name: Install dependencies
|
||||
run: uv sync --group dev
|
||||
- name: Run tests (excluding integration/slow)
|
||||
run: uv run pytest -m "not slow and not integration" --tb=short -q
|
||||
- name: Check coverage
|
||||
run: uv run pytest -m "not slow and not integration" --cov=unraid_mcp --cov-report=term-missing --tb=short -q
|
||||
|
||||
version-sync:
|
||||
name: Version Sync Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check pyproject.toml and plugin.json versions match
|
||||
run: |
|
||||
TOML_VER=$(grep '^version = ' pyproject.toml | sed 's/version = "//;s/"//')
|
||||
PLUGIN_VER=$(python3 -c "import json; print(json.load(open('.claude-plugin/plugin.json'))['version'])")
|
||||
echo "pyproject.toml: $TOML_VER"
|
||||
echo "plugin.json: $PLUGIN_VER"
|
||||
if [ "$TOML_VER" != "$PLUGIN_VER" ]; then
|
||||
echo "ERROR: Version mismatch! Update .claude-plugin/plugin.json to match pyproject.toml"
|
||||
exit 1
|
||||
fi
|
||||
echo "Versions in sync: $TOML_VER"
|
||||
|
||||
audit:
|
||||
name: Security Audit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "0.9.25"
|
||||
- name: Dependency audit
|
||||
run: uv audit
|
||||
135
CHANGELOG.md
Normal file
135
CHANGELOG.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project are documented here.
|
||||
|
||||
## [1.1.2] - 2026-03-23
|
||||
|
||||
### Security
|
||||
- **Path traversal**: Removed `/mnt/` from `_ALLOWED_LOG_PREFIXES` — was exposing all Unraid user shares to path-based reads
|
||||
- **Path traversal**: Added early `..` detection for `disk/logs` and `live/log_tail` before any filesystem access; added `/boot/` prefix restriction for `flash_backup` source paths
|
||||
- **Timing-safe auth**: `verify_token` now uses `hmac.compare_digest` instead of `==` to prevent timing oracle attacks on API key comparison
|
||||
- **Traceback leak**: `include_traceback` in `ErrorHandlingMiddleware` is now gated on `DEBUG` log level; production deployments no longer expose stack traces
|
||||
|
||||
### Fixed
|
||||
- **Health check**: `_comprehensive_health_check` now re-raises `CredentialsNotConfiguredError` instead of swallowing it into a generic unhealthy status
|
||||
- **UPS device query**: Removed non-existent `nominalPower` and `currentPower` fields from `ups_device` query — every call was failing against the live API
|
||||
- **Stale credential bindings**: Subscription modules (`manager.py`, `snapshot.py`, `utils.py`, `diagnostics.py`) previously captured `UNRAID_API_KEY`/`UNRAID_API_URL` at import time; replaced with `_settings.ATTR` call-time access so `apply_runtime_config()` updates propagate correctly after credential elicitation
|
||||
|
||||
### Added
|
||||
- **CI pipeline**: `.github/workflows/ci.yml` with 5 jobs — lint (`ruff`), typecheck (`ty`), test (`pytest -m "not integration"`), version-sync check, and `uv audit` dependency scan
|
||||
- **Coverage threshold**: `fail_under = 80` added to `[tool.coverage.report]`
|
||||
- **Version sync check**: `scripts/validate-marketplace.sh` now verifies `pyproject.toml` and `plugin.json` versions match
|
||||
|
||||
### Changed
|
||||
- **Docs**: Updated `CLAUDE.md`, `README.md` to reflect 3 tools (1 primary + 2 diagnostic); corrected system domain count (19→18); fixed scripts comment
|
||||
- **Docs**: `docs/AUTHENTICATION.md` H1 retitled to "Authentication Setup Guide"
|
||||
- **Docs**: Added `UNRAID_CREDENTIALS_DIR` commented entry to `.env.example`
|
||||
- Removed `from __future__ import annotations` from `snapshot.py` (caused TC002 false positives with FastMCP)
|
||||
- Added `# noqa: ASYNC109` to `timeout` parameters in `_handle_live` and `unraid()` (valid suppressions)
|
||||
- Fixed `start_array*` → `start_array` in tool docstring table (`start_array` is not in `_ARRAY_DESTRUCTIVE`)
|
||||
|
||||
---
|
||||
|
||||
## [1.1.1] - 2026-03-16
|
||||
|
||||
### Added
|
||||
- **API key auth**: `Authorization: Bearer <UNRAID_MCP_API_KEY>` bearer token authentication via `ApiKeyVerifier` — machine-to-machine access without OAuth browser flow
|
||||
- **MultiAuth**: When both Google OAuth and API key are configured, `MultiAuth` accepts either method
|
||||
- **Google OAuth**: Full `GoogleProvider` integration — browser-based OAuth 2.0 flow with JWT session tokens; `UNRAID_MCP_JWT_SIGNING_KEY` for stable tokens across restarts
|
||||
- **`fastmcp.json`**: Dev tooling configs for FastMCP
|
||||
|
||||
### Fixed
|
||||
- Auth test isolation: use `os.environ[k] = ""` instead of `delenv` to prevent dotenv re-injection between test reloads
|
||||
|
||||
---
|
||||
|
||||
## [1.1.0] - 2026-03-16
|
||||
|
||||
### Breaking Changes
|
||||
- **Tool consolidation**: 15 individual domain tools (`unraid_docker`, `unraid_vm`, etc.) merged into single `unraid` tool with `action` + `subaction` routing
|
||||
- Old: `unraid_docker(action="list")`
|
||||
- New: `unraid(action="docker", subaction="list")`
|
||||
|
||||
### Added
|
||||
- **`live` tool** (11 subactions): Real-time WebSocket subscription snapshots — `cpu`, `memory`, `cpu_telemetry`, `array_state`, `parity_progress`, `ups_status`, `notifications_overview`, `notification_feed`, `log_tail`, `owner`, `server_status`
|
||||
- **`customization` tool** (5 subactions): `theme`, `public_theme`, `is_initial_setup`, `sso_enabled`, `set_theme`
|
||||
- **`plugin` tool** (3 subactions): `list`, `add`, `remove`
|
||||
- **`oidc` tool** (5 subactions): `providers`, `provider`, `configuration`, `public_providers`, `validate_session`
|
||||
- **Persistent `SubscriptionManager`**: `unraid://live/*` MCP resources backed by long-lived WebSocket connections with auto-start and reconnection
|
||||
- **`diagnose_subscriptions`** and **`test_subscription_query`** diagnostic tools
|
||||
- `array`: Added `parity_history`, `start_array`, `stop_array`, `add_disk`, `remove_disk`, `mount_disk`, `unmount_disk`, `clear_disk_stats`
|
||||
- `keys`: Added `add_role`, `remove_role`
|
||||
- `settings`: Added `update_ssh` (confirm required)
|
||||
- `stop_array` added to `_ARRAY_DESTRUCTIVE`
|
||||
- `gate_destructive_action` helper in `core/guards.py` — centralized elicitation + confirm guard
|
||||
- Full safety test suite: `TestNoGraphQLCallsWhenUnconfirmed` (zero-I/O guarantee for all 13 destructive actions)
|
||||
|
||||
### Fixed
|
||||
- Removed 29 actions confirmed absent from live API v4.29.2 via GraphQL introspection (Docker organizer mutations, `unassignedDevices`, `warningsAndAlerts`, etc.)
|
||||
- `log_tail` path validated against allowlist before subscription start
|
||||
- WebSocket auth uses `x-api-key` connectionParams format
|
||||
|
||||
---
|
||||
|
||||
## [1.0.0] - 2026-03-14 through 2026-03-15
|
||||
|
||||
### Breaking Changes
|
||||
- Credential storage moved to `~/.unraid-mcp/.env` (dir 700, file 600); all runtimes load from this path
|
||||
- `unraid_health(action="setup")` is the only tool that triggers credential elicitation; all others propagate `CredentialsNotConfiguredError`
|
||||
|
||||
### Added
|
||||
- `CredentialsNotConfiguredError` sentinel — propagates cleanly through `tool_error_handler` with exact credential path in the error message
|
||||
- `is_configured()` and `apply_runtime_config()` in `settings.py` for runtime credential injection
|
||||
- `elicit_and_configure()` with `.env` persistence and confirmation before overwrite
|
||||
- 28 GraphQL mutations across storage, docker, notifications, and new settings tool
|
||||
- Comprehensive test suite expansion: schema validation (99 tests), HTTP layer (respx), property tests, safety audit, contract tests
|
||||
|
||||
### Fixed
|
||||
- Numerous PR review fixes across 50+ commits (CodeRabbit, ChatGPT-Codex review rounds)
|
||||
- Shell scripts hardened against injection and null guards
|
||||
- Notification enum validation, subscription lock split, safe_get semantics
|
||||
|
||||
---
|
||||
|
||||
## [0.6.0] - 2026-03-15
|
||||
|
||||
### Added
|
||||
- Subscription byte/line cap to prevent unbounded memory growth
|
||||
- `asyncio.timeout` bounds on `subscribe_once` / `subscribe_collect`
|
||||
- Partial auto-start for subscriptions (best-effort on startup)
|
||||
|
||||
### Fixed
|
||||
- WebSocket URL scheme handling (`ws://`/`wss://`)
|
||||
- `flash_backup` path validation and smoke test assertions
|
||||
|
||||
---
|
||||
|
||||
## [0.5.0] - 2026-03-15
|
||||
|
||||
*Tool expansion and live subscription foundation.*
|
||||
|
||||
---
|
||||
|
||||
## [0.4.x] - 2026-03-13 through 2026-03-14
|
||||
|
||||
*Credential elicitation system, per-tool refactors, and mutation additions.*
|
||||
|
||||
---
|
||||
|
||||
## [0.2.x] - 2026-02-15 through 2026-03-13
|
||||
|
||||
*Initial public release hardening: PR review cycles, test suite expansion, security fixes, plugin manifest.*
|
||||
|
||||
---
|
||||
|
||||
## [0.1.0] - 2026-02-08
|
||||
|
||||
### Added
|
||||
- Consolidated 26 tools into 10 tools with 90 actions
|
||||
- FastMCP architecture migration with `uv` toolchain
|
||||
- Docker Compose support with health checks
|
||||
- WebSocket subscription infrastructure
|
||||
|
||||
---
|
||||
|
||||
*Format: [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). Versioning: [Semantic Versioning](https://semver.org/).*
|
||||
122
CLAUDE.md
122
CLAUDE.md
@@ -54,12 +54,58 @@ docker compose down
|
||||
```
|
||||
|
||||
### Environment Setup
|
||||
- Copy `.env.example` to `.env` and configure:
|
||||
- `UNRAID_API_URL`: Unraid GraphQL endpoint (required)
|
||||
- `UNRAID_API_KEY`: Unraid API key (required)
|
||||
- `UNRAID_MCP_TRANSPORT`: Transport type (default: streamable-http)
|
||||
- `UNRAID_MCP_PORT`: Server port (default: 6970)
|
||||
- `UNRAID_MCP_HOST`: Server host (default: 0.0.0.0)
|
||||
Copy `.env.example` to `.env` and configure:
|
||||
|
||||
**Required:**
|
||||
- `UNRAID_API_URL`: Unraid GraphQL endpoint
|
||||
- `UNRAID_API_KEY`: Unraid API key
|
||||
|
||||
**Server:**
|
||||
- `UNRAID_MCP_TRANSPORT`: Transport type (default: streamable-http)
|
||||
- `UNRAID_MCP_PORT`: Server port (default: 6970)
|
||||
- `UNRAID_MCP_HOST`: Server host (default: 0.0.0.0)
|
||||
- `UNRAID_MCP_LOG_LEVEL`: Log verbosity (default: INFO)
|
||||
- `UNRAID_MCP_LOG_FILE`: Log filename in logs/ (default: unraid-mcp.log)
|
||||
|
||||
**SSL/TLS:**
|
||||
- `UNRAID_VERIFY_SSL`: SSL verification (default: true; set `false` for self-signed certs)
|
||||
|
||||
**Subscriptions:**
|
||||
- `UNRAID_AUTO_START_SUBSCRIPTIONS`: Auto-start live subscriptions on startup (default: true)
|
||||
- `UNRAID_MAX_RECONNECT_ATTEMPTS`: WebSocket reconnect limit (default: 10)
|
||||
|
||||
**Credentials override:**
|
||||
- `UNRAID_CREDENTIALS_DIR`: Override the `~/.unraid-mcp/` credentials directory path
|
||||
|
||||
### Authentication (Optional — protects the HTTP server)
|
||||
|
||||
Two independent methods. Use either or both — when both are set, `MultiAuth` accepts either.
|
||||
|
||||
**Google OAuth** — requires all three vars:
|
||||
|
||||
| Env Var | Purpose |
|
||||
|---------|---------|
|
||||
| `GOOGLE_CLIENT_ID` | Google OAuth 2.0 Client ID |
|
||||
| `GOOGLE_CLIENT_SECRET` | Google OAuth 2.0 Client Secret |
|
||||
| `UNRAID_MCP_BASE_URL` | Public URL of this server (e.g. `http://10.1.0.2:6970`) |
|
||||
| `UNRAID_MCP_JWT_SIGNING_KEY` | Stable 32+ char secret — prevents token invalidation on restart |
|
||||
|
||||
Google Cloud Console setup: APIs & Services → Credentials → OAuth 2.0 Client ID (Web application) → Authorized redirect URIs: `<UNRAID_MCP_BASE_URL>/auth/callback`
|
||||
|
||||
**API Key** — clients present as `Authorization: Bearer <key>`:
|
||||
|
||||
| Env Var | Purpose |
|
||||
|---------|---------|
|
||||
| `UNRAID_MCP_API_KEY` | Static bearer token (can be same value as `UNRAID_API_KEY`) |
|
||||
|
||||
**Generate a stable JWT signing key:**
|
||||
```bash
|
||||
python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
```
|
||||
|
||||
**Omit all auth vars to run without auth** (default — open server).
|
||||
|
||||
**Full guide:** [`docs/AUTHENTICATION.md`](docs/AUTHENTICATION.md)
|
||||
|
||||
## Architecture
|
||||
|
||||
@@ -83,31 +129,46 @@ docker compose down
|
||||
- **Data Processing**: Tools return both human-readable summaries and detailed raw data
|
||||
- **Health Monitoring**: Comprehensive health check tool for system monitoring
|
||||
- **Real-time Subscriptions**: WebSocket-based live data streaming
|
||||
- **Persistent Subscription Manager**: `live` action subactions use a shared `SubscriptionManager`
|
||||
that maintains persistent WebSocket connections. Resources serve cached data via
|
||||
`subscription_manager.get_resource_data(action)`. A "connecting" placeholder is returned
|
||||
while the subscription starts — callers should retry in a moment. When
|
||||
`UNRAID_AUTO_START_SUBSCRIPTIONS=false`, resources fall back to on-demand `subscribe_once`.
|
||||
|
||||
### Tool Categories (15 Tools, ~103 Actions)
|
||||
1. **`unraid_info`** (18 actions): overview, array, network, registration, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config
|
||||
2. **`unraid_array`** (13 actions): parity_start, parity_pause, parity_resume, parity_cancel, parity_status, parity_history, start_array, stop_array, add_disk, remove_disk, mount_disk, unmount_disk, clear_disk_stats
|
||||
3. **`unraid_storage`** (6 actions): shares, disks, disk_details, log_files, logs, flash_backup
|
||||
4. **`unraid_docker`** (7 actions): list, details, start, stop, restart, networks, network_details
|
||||
5. **`unraid_vm`** (9 actions): list, details, start, stop, pause, resume, force_stop, reboot, reset
|
||||
6. **`unraid_notifications`** (12 actions): overview, list, create, archive, unread, delete, delete_archived, archive_all, archive_many, unarchive_many, unarchive_all, recalculate
|
||||
7. **`unraid_rclone`** (4 actions): list_remotes, config_form, create_remote, delete_remote
|
||||
8. **`unraid_users`** (1 action): me
|
||||
9. **`unraid_keys`** (7 actions): list, get, create, update, delete, add_role, remove_role
|
||||
10. **`unraid_health`** (4 actions): check, test_connection, diagnose, setup
|
||||
11. **`unraid_settings`** (2 actions): update, configure_ups
|
||||
12. **`unraid_customization`** (5 actions): theme, public_theme, is_initial_setup, sso_enabled, set_theme
|
||||
13. **`unraid_plugins`** (3 actions): list, add, remove
|
||||
14. **`unraid_oidc`** (5 actions): providers, provider, configuration, public_providers, validate_session
|
||||
15. **`unraid_live`** (11 actions): cpu, memory, cpu_telemetry, array_state, parity_progress, ups_status, notifications_overview, notification_feed, log_tail, owner, server_status
|
||||
### Tool Categories (3 Tools: 1 Primary + 2 Diagnostic)
|
||||
|
||||
The server registers **3 MCP tools**:
|
||||
- **`unraid`** — primary tool with `action` (domain) + `subaction` (operation) routing, 107 subactions. Call it as `unraid(action="docker", subaction="list")`.
|
||||
- **`diagnose_subscriptions`** — inspect subscription connection states, errors, and WebSocket URLs.
|
||||
- **`test_subscription_query`** — test a specific GraphQL subscription query (allowlisted fields only).
|
||||
|
||||
| action | subactions |
|
||||
|--------|-----------|
|
||||
| **system** (18) | overview, array, network, registration, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config |
|
||||
| **health** (4) | check, test_connection, diagnose, setup |
|
||||
| **array** (13) | parity_status, parity_history, parity_start, parity_pause, parity_resume, parity_cancel, start_array, stop_array*, add_disk, remove_disk*, mount_disk, unmount_disk, clear_disk_stats* |
|
||||
| **disk** (6) | shares, disks, disk_details, log_files, logs, flash_backup* |
|
||||
| **docker** (7) | list, details, start, stop, restart, networks, network_details |
|
||||
| **vm** (9) | list, details, start, stop, pause, resume, force_stop*, reboot, reset* |
|
||||
| **notification** (12) | overview, list, create, archive, mark_unread, recalculate, archive_all, archive_many, unarchive_many, unarchive_all, delete*, delete_archived* |
|
||||
| **key** (7) | list, get, create, update, delete*, add_role, remove_role |
|
||||
| **plugin** (3) | list, add, remove* |
|
||||
| **rclone** (4) | list_remotes, config_form, create_remote, delete_remote* |
|
||||
| **setting** (2) | update, configure_ups* |
|
||||
| **customization** (5) | theme, public_theme, is_initial_setup, sso_enabled, set_theme |
|
||||
| **oidc** (5) | providers, provider, configuration, public_providers, validate_session |
|
||||
| **user** (1) | me |
|
||||
| **live** (11) | cpu, memory, cpu_telemetry, array_state, parity_progress, ups_status, notifications_overview, notification_feed, log_tail, owner, server_status |
|
||||
|
||||
`*` = destructive, requires `confirm=True`
|
||||
|
||||
### Destructive Actions (require `confirm=True`)
|
||||
- **array**: remove_disk, clear_disk_stats
|
||||
- **array**: stop_array, remove_disk, clear_disk_stats
|
||||
- **vm**: force_stop, reset
|
||||
- **notifications**: delete, delete_archived
|
||||
- **rclone**: delete_remote
|
||||
- **keys**: delete
|
||||
- **storage**: flash_backup
|
||||
- **disk**: flash_backup
|
||||
- **settings**: configure_ups
|
||||
- **plugins**: remove
|
||||
|
||||
@@ -140,10 +201,10 @@ The server loads environment variables from multiple locations in order:
|
||||
## Critical Gotchas
|
||||
|
||||
### Mutation Handler Ordering
|
||||
**Mutation handlers MUST return before the `QUERIES[action]` lookup.** Mutations are not in the `QUERIES` dict — reaching that line for a mutation action causes a `KeyError`. Always add early-return `if action == "mutation_name": ... return` blocks BEFORE the `QUERIES` lookup.
|
||||
**Mutation handlers MUST return before the domain query dict lookup.** Mutations are not in the domain `_*_QUERIES` dicts (e.g., `_DOCKER_QUERIES`, `_ARRAY_QUERIES`) — reaching that line for a mutation subaction causes a `KeyError`. Always add early-return `if subaction == "mutation_name": ... return` blocks BEFORE the queries lookup.
|
||||
|
||||
### Test Patching
|
||||
- Patch at the **tool module level**: `unraid_mcp.tools.info.make_graphql_request` (not core)
|
||||
- Patch at the **tool module level**: `unraid_mcp.tools.unraid.make_graphql_request` (not core)
|
||||
- `conftest.py`'s `mock_graphql_request` patches the core module — wrong for tool-level tests
|
||||
- Use `conftest.py`'s `make_tool_fn()` helper or local `_make_tool()` pattern
|
||||
|
||||
@@ -169,11 +230,14 @@ uv run pytest -x # Fail fast on first error
|
||||
|
||||
### Scripts
|
||||
```bash
|
||||
# HTTP smoke-test against a live server (11 tools, all non-destructive actions)
|
||||
# HTTP smoke-test against a live server (non-destructive actions, all domains)
|
||||
./tests/mcporter/test-actions.sh [MCP_URL] # default: http://localhost:6970/mcp
|
||||
|
||||
# stdio smoke-test, no running server needed (good for CI)
|
||||
./tests/mcporter/test-tools.sh [--parallel] [--timeout-ms N] [--verbose]
|
||||
|
||||
# Destructive action smoke-test (confirms guard blocks without confirm=True)
|
||||
./tests/mcporter/test-destructive.sh [MCP_URL]
|
||||
```
|
||||
See `tests/mcporter/README.md` for transport differences and `docs/DESTRUCTIVE_ACTIONS.md` for exact destructive-action test commands.
|
||||
|
||||
@@ -190,7 +254,9 @@ When bumping the version, **always update both files** — they must stay in syn
|
||||
|
||||
### Credential Storage (`~/.unraid-mcp/.env`)
|
||||
All runtimes (plugin, direct, Docker) load credentials from `~/.unraid-mcp/.env`.
|
||||
- **Plugin/direct:** `unraid_health action=setup` writes this file automatically via elicitation,
|
||||
- **Plugin/direct:** `unraid action=health subaction=setup` writes this file automatically via elicitation,
|
||||
**Safe to re-run**: always prompts for confirmation before overwriting existing credentials,
|
||||
whether the connection is working or not (failed probe may be a transient outage, not bad creds).
|
||||
or manual: `mkdir -p ~/.unraid-mcp && cp .env.example ~/.unraid-mcp/.env` then edit.
|
||||
- **Docker:** `docker-compose.yml` loads it via `env_file` before container start.
|
||||
- **No symlinks needed.** Version bumps do not affect this path.
|
||||
|
||||
245
README.md
245
README.md
@@ -1,20 +1,20 @@
|
||||
# 🚀 Unraid MCP Server
|
||||
|
||||
[](https://www.python.org/downloads/)
|
||||
[](https://github.com/jlowin/fastmcp)
|
||||
[](https://github.com/jlowin/fastmcp)
|
||||
[](LICENSE)
|
||||
|
||||
**A powerful MCP (Model Context Protocol) server that provides comprehensive tools to interact with an Unraid server's GraphQL API.**
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- 🔧 **11 Tools, ~104 Actions**: Complete Unraid management through MCP protocol
|
||||
- 🔧 **1 primary tool + 2 diagnostic tools, 107 subactions**: Complete Unraid management through a consolidated MCP tool
|
||||
- 🏗️ **Modular Architecture**: Clean, maintainable, and extensible codebase
|
||||
- ⚡ **High Performance**: Async/concurrent operations with optimized timeouts
|
||||
- 🔄 **Real-time Data**: WebSocket subscriptions for live log streaming
|
||||
- 🔄 **Real-time Data**: WebSocket subscriptions for live metrics, logs, array state, and more
|
||||
- 📊 **Health Monitoring**: Comprehensive system diagnostics and status
|
||||
- 🐳 **Docker Ready**: Full containerization support with Docker Compose
|
||||
- 🔒 **Secure**: Proper SSL/TLS configuration and API key management
|
||||
- 🔒 **Secure**: Optional Google OAuth 2.0 authentication + SSL/TLS + API key management
|
||||
- 📝 **Rich Logging**: Structured logging with rotation and multiple levels
|
||||
|
||||
---
|
||||
@@ -25,8 +25,8 @@
|
||||
- [Quick Start](#-quick-start)
|
||||
- [Installation](#-installation)
|
||||
- [Configuration](#-configuration)
|
||||
- [Google OAuth](#-google-oauth-optional)
|
||||
- [Available Tools & Resources](#-available-tools--resources)
|
||||
- [Custom Slash Commands](#-custom-slash-commands)
|
||||
- [Development](#-development)
|
||||
- [Architecture](#-architecture)
|
||||
- [Troubleshooting](#-troubleshooting)
|
||||
@@ -46,8 +46,7 @@
|
||||
```
|
||||
|
||||
This provides instant access to Unraid monitoring and management through Claude Code with:
|
||||
- **11 MCP tools** exposing **~104 actions** via the consolidated action pattern
|
||||
- **10 slash commands** for quick CLI-style access (`commands/`)
|
||||
- **1 primary MCP tool** (`unraid`) exposing **107 subactions** via `action` + `subaction` routing, plus `diagnose_subscriptions` and `test_subscription_query` diagnostic tools
|
||||
- Real-time system metrics and health monitoring
|
||||
- Docker container and VM lifecycle management
|
||||
- Disk health monitoring and storage management
|
||||
@@ -61,7 +60,7 @@ Claude Code plugin, direct `uv run` invocations, and Docker.
|
||||
|
||||
**Option 1 — Interactive (Claude Code plugin, elicitation-supported clients):**
|
||||
```
|
||||
unraid_health action=setup
|
||||
unraid(action="health", subaction="setup")
|
||||
```
|
||||
The server prompts for your API URL and key, writes `~/.unraid-mcp/.env` automatically
|
||||
(created with mode 700/600), and activates credentials without restart.
|
||||
@@ -97,8 +96,13 @@ cd unraid-mcp
|
||||
|
||||
### 2. Configure Environment
|
||||
```bash
|
||||
# For Docker/production use — canonical credential location (all runtimes)
|
||||
mkdir -p ~/.unraid-mcp && chmod 700 ~/.unraid-mcp
|
||||
cp .env.example ~/.unraid-mcp/.env && chmod 600 ~/.unraid-mcp/.env
|
||||
# Edit ~/.unraid-mcp/.env with your values
|
||||
|
||||
# For local development only
|
||||
cp .env.example .env
|
||||
# Edit .env with your Unraid API details
|
||||
```
|
||||
|
||||
### 3. Deploy with Docker (Recommended)
|
||||
@@ -130,15 +134,13 @@ unraid-mcp/ # ${CLAUDE_PLUGIN_ROOT}
|
||||
├── .claude-plugin/
|
||||
│ ├── marketplace.json # Marketplace catalog
|
||||
│ └── plugin.json # Plugin manifest
|
||||
├── commands/ # 10 custom slash commands
|
||||
├── unraid_mcp/ # MCP server Python package
|
||||
├── skills/unraid/ # Skill and documentation
|
||||
├── pyproject.toml # Dependencies and entry points
|
||||
└── scripts/ # Validation and helper scripts
|
||||
```
|
||||
|
||||
- **MCP Server**: 11 tools with ~104 actions via GraphQL API
|
||||
- **Slash Commands**: 10 commands in `commands/` for quick CLI-style access
|
||||
- **MCP Server**: 3 tools — `unraid` (107 subactions) + `diagnose_subscriptions` + `test_subscription_query`
|
||||
- **Skill**: `/unraid` skill for monitoring and queries
|
||||
- **Entry Point**: `unraid-mcp-server` defined in pyproject.toml
|
||||
|
||||
@@ -226,8 +228,12 @@ UNRAID_MCP_LOG_FILE=unraid-mcp.log
|
||||
# SSL/TLS Configuration
|
||||
UNRAID_VERIFY_SSL=true # true, false, or path to CA bundle
|
||||
|
||||
# Subscription Configuration
|
||||
UNRAID_AUTO_START_SUBSCRIPTIONS=true # Auto-start WebSocket subscriptions on startup (default: true)
|
||||
UNRAID_MAX_RECONNECT_ATTEMPTS=10 # Max WebSocket reconnection attempts (default: 10)
|
||||
|
||||
# Optional: Log Stream Configuration
|
||||
# UNRAID_AUTOSTART_LOG_PATH=/var/log/syslog # Path for log streaming resource
|
||||
# UNRAID_AUTOSTART_LOG_PATH=/var/log/syslog # Override log path for unraid://logs/stream (auto-detects /var/log/syslog if unset)
|
||||
```
|
||||
|
||||
### Transport Options
|
||||
@@ -240,88 +246,104 @@ UNRAID_VERIFY_SSL=true # true, false, or path to CA bundle
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Available Tools & Resources
|
||||
## 🔐 Authentication (Optional)
|
||||
|
||||
Each tool uses a consolidated `action` parameter to expose multiple operations, reducing context window usage. Destructive actions require `confirm=True`.
|
||||
Two independent auth methods — use either or both.
|
||||
|
||||
### Tool Categories (11 Tools, ~104 Actions)
|
||||
### Google OAuth
|
||||
|
||||
| Tool | Actions | Description |
|
||||
|------|---------|-------------|
|
||||
| **`unraid_info`** | 21 | overview, array, network, registration, connect, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config, update_server, update_ssh |
|
||||
| **`unraid_array`** | 5 | parity_start, parity_pause, parity_resume, parity_cancel, parity_status |
|
||||
| **`unraid_storage`** | 7 | shares, disks, disk_details, unassigned, log_files, logs, flash_backup |
|
||||
| **`unraid_docker`** | 26 | list, details, start, stop, restart, pause, unpause, remove, update, update_all, logs, networks, network_details, port_conflicts, check_updates, create_folder, set_folder_children, delete_entries, move_to_folder, move_to_position, rename_folder, create_folder_with_items, update_view_prefs, sync_templates, reset_template_mappings, refresh_digests |
|
||||
| **`unraid_vm`** | 9 | list, details, start, stop, pause, resume, force_stop, reboot, reset |
|
||||
| **`unraid_notifications`** | 14 | overview, list, warnings, create, create_unique, archive, archive_many, unread, unarchive_many, unarchive_all, recalculate, delete, delete_archived, archive_all |
|
||||
| **`unraid_rclone`** | 4 | list_remotes, config_form, create_remote, delete_remote |
|
||||
| **`unraid_users`** | 1 | me |
|
||||
| **`unraid_keys`** | 5 | list, get, create, update, delete |
|
||||
| **`unraid_health`** | 3 | check, test_connection, diagnose |
|
||||
| **`unraid_settings`** | 9 | update, update_temperature, update_time, configure_ups, update_api, connect_sign_in, connect_sign_out, setup_remote_access, enable_dynamic_remote_access |
|
||||
Protect the HTTP server with Google OAuth 2.0 — clients must complete a Google login before any tool call is executed.
|
||||
|
||||
### MCP Resources (Real-time Data)
|
||||
- `unraid://logs/stream` - Live log streaming from `/var/log/syslog` with WebSocket subscriptions
|
||||
```bash
|
||||
# Add to ~/.unraid-mcp/.env
|
||||
GOOGLE_CLIENT_ID=your-client-id.apps.googleusercontent.com
|
||||
GOOGLE_CLIENT_SECRET=GOCSPX-your-secret
|
||||
UNRAID_MCP_BASE_URL=http://10.1.0.2:6970 # public URL of this server
|
||||
UNRAID_MCP_JWT_SIGNING_KEY=<64-char-hex> # prevents token invalidation on restart
|
||||
```
|
||||
|
||||
> **Note**: MCP Resources provide real-time data streams that can be accessed via MCP clients. The log stream resource automatically connects to your Unraid system logs and provides live updates.
|
||||
**Quick setup:**
|
||||
1. [Google Cloud Console](https://console.cloud.google.com/) → Credentials → OAuth 2.0 Client ID (Web application)
|
||||
2. Authorized redirect URI: `<UNRAID_MCP_BASE_URL>/auth/callback`
|
||||
3. Copy Client ID + Secret into `~/.unraid-mcp/.env`
|
||||
4. Generate a signing key: `python3 -c "import secrets; print(secrets.token_hex(32))"`
|
||||
5. Restart the server
|
||||
|
||||
### API Key (Bearer Token)
|
||||
|
||||
Simpler option for headless/machine access — no browser flow required:
|
||||
|
||||
```bash
|
||||
# Add to ~/.unraid-mcp/.env
|
||||
UNRAID_MCP_API_KEY=your-secret-token # can be same value as UNRAID_API_KEY
|
||||
```
|
||||
|
||||
Clients present it as `Authorization: Bearer <UNRAID_MCP_API_KEY>`. Set both `GOOGLE_CLIENT_ID` and `UNRAID_MCP_API_KEY` to accept either method simultaneously.
|
||||
|
||||
Omit both to run without authentication (default — open server).
|
||||
|
||||
**Full guide:** [`docs/AUTHENTICATION.md`](docs/AUTHENTICATION.md)
|
||||
|
||||
---
|
||||
|
||||
## 💬 Custom Slash Commands
|
||||
## 🛠️ Available Tools & Resources
|
||||
|
||||
The project includes **10 custom slash commands** in `commands/` for quick access to Unraid operations:
|
||||
The single `unraid` tool uses `action` (domain) + `subaction` (operation) routing to expose all operations via one MCP tool, minimizing context window usage. Destructive actions require `confirm=True`.
|
||||
|
||||
### Available Commands
|
||||
### Primary Tool: 15 Domains, 107 Subactions
|
||||
|
||||
| Command | Actions | Quick Access |
|
||||
|---------|---------|--------------|
|
||||
| `/info` | 21 | System information, metrics, configuration |
|
||||
| `/array` | 5 | Parity check management |
|
||||
| `/storage` | 7 | Shares, disks, logs |
|
||||
| `/docker` | 26 | Container management and monitoring |
|
||||
| `/vm` | 9 | Virtual machine lifecycle |
|
||||
| `/notifications` | 14 | Alert management |
|
||||
| `/rclone` | 4 | Cloud storage remotes |
|
||||
| `/users` | 1 | Current user query |
|
||||
| `/keys` | 5 | API key management |
|
||||
| `/health` | 3 | System health checks |
|
||||
Call pattern: `unraid(action="<domain>", subaction="<operation>")`
|
||||
|
||||
### Example Usage
|
||||
| action= | Subactions | Description |
|
||||
|---------|-----------|-------------|
|
||||
| **`system`** | overview, array, network, registration, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config | Server info, metrics, network, UPS (18 subactions) |
|
||||
| **`health`** | check, test_connection, diagnose, setup | Health checks, connection test, diagnostics, interactive setup (4 subactions) |
|
||||
| **`array`** | parity_status, parity_history, parity_start, parity_pause, parity_resume, parity_cancel, start_array, stop_array, add_disk, remove_disk, mount_disk, unmount_disk, clear_disk_stats | Parity checks, array state, disk operations (13 subactions) |
|
||||
| **`disk`** | shares, disks, disk_details, log_files, logs, flash_backup | Shares, physical disks, log files (6 subactions) |
|
||||
| **`docker`** | list, details, start, stop, restart, networks, network_details | Container lifecycle and network inspection (7 subactions) |
|
||||
| **`vm`** | list, details, start, stop, pause, resume, force_stop, reboot, reset | Virtual machine lifecycle (9 subactions) |
|
||||
| **`notification`** | overview, list, create, archive, mark_unread, delete, delete_archived, archive_all, archive_many, unarchive_many, unarchive_all, recalculate | System notifications CRUD (12 subactions) |
|
||||
| **`key`** | list, get, create, update, delete, add_role, remove_role | API key management (7 subactions) |
|
||||
| **`plugin`** | list, add, remove | Plugin management (3 subactions) |
|
||||
| **`rclone`** | list_remotes, config_form, create_remote, delete_remote | Cloud storage remote management (4 subactions) |
|
||||
| **`setting`** | update, configure_ups | System settings and UPS config (2 subactions) |
|
||||
| **`customization`** | theme, public_theme, is_initial_setup, sso_enabled, set_theme | Theme and UI customization (5 subactions) |
|
||||
| **`oidc`** | providers, provider, configuration, public_providers, validate_session | OIDC/SSO provider management (5 subactions) |
|
||||
| **`user`** | me | Current authenticated user (1 subaction) |
|
||||
| **`live`** | cpu, memory, cpu_telemetry, array_state, parity_progress, ups_status, notifications_overview, owner, server_status, log_tail, notification_feed | Real-time WebSocket subscription snapshots (11 subactions) |
|
||||
|
||||
```bash
|
||||
# System monitoring
|
||||
/info overview
|
||||
/health check
|
||||
/storage shares
|
||||
### Destructive Actions (require `confirm=True`)
|
||||
- **array**: `stop_array`, `remove_disk`, `clear_disk_stats`
|
||||
- **vm**: `force_stop`, `reset`
|
||||
- **notification**: `delete`, `delete_archived`
|
||||
- **rclone**: `delete_remote`
|
||||
- **key**: `delete`
|
||||
- **disk**: `flash_backup`
|
||||
- **setting**: `configure_ups`
|
||||
- **plugin**: `remove`
|
||||
|
||||
# Container management
|
||||
/docker list
|
||||
/docker start plex
|
||||
/docker logs nginx
|
||||
### MCP Resources (Real-time Cached Data)
|
||||
|
||||
# VM operations
|
||||
/vm list
|
||||
/vm start windows-10
|
||||
The server exposes two classes of MCP resources backed by persistent WebSocket connections:
|
||||
|
||||
# Notifications
|
||||
/notifications warnings
|
||||
/notifications archive_all
|
||||
**`unraid://live/*` — 9 snapshot resources** (auto-started, always-cached):
|
||||
- `unraid://live/cpu` — CPU utilization
|
||||
- `unraid://live/memory` — Memory usage
|
||||
- `unraid://live/cpu_telemetry` — Detailed CPU telemetry
|
||||
- `unraid://live/array_state` — Array state changes
|
||||
- `unraid://live/parity_progress` — Parity check progress
|
||||
- `unraid://live/ups_status` — UPS status
|
||||
- `unraid://live/notifications_overview` — Notification counts
|
||||
- `unraid://live/owner` — Owner info changes
|
||||
- `unraid://live/server_status` — Server status changes
|
||||
|
||||
# User management
|
||||
/users list
|
||||
/keys create "Automation Key" "For CI/CD"
|
||||
```
|
||||
**`unraid://logs/stream`** — Live log file tail (path controlled by `UNRAID_AUTOSTART_LOG_PATH`)
|
||||
|
||||
### Command Features
|
||||
> **Note**: Resources return cached data from persistent WebSocket subscriptions. A `{"status": "connecting"}` placeholder is returned while the subscription initializes — retry in a moment.
|
||||
>
|
||||
> **`log_tail` and `notification_feed`** are accessible as tool subactions (`unraid(action="live", subaction="log_tail")`) but are not registered as MCP resources — they use transient one-shot subscriptions and require parameters.
|
||||
|
||||
Each slash command provides:
|
||||
- **Comprehensive documentation** of all available actions
|
||||
- **Argument hints** for required parameters
|
||||
- **Safety warnings** for destructive operations (⚠️)
|
||||
- **Usage examples** for common scenarios
|
||||
- **Action categorization** (Query, Lifecycle, Management, Destructive)
|
||||
|
||||
Run any command without arguments to see full documentation, or type `/help` to list all available commands.
|
||||
> **Security note**: The `disk/logs` and `live/log_tail` subactions allow reading files under `/var/log/` and `/boot/logs/` on the Unraid server. Authenticated MCP clients can stream any log file within these directories.
|
||||
|
||||
---
|
||||
|
||||
@@ -333,32 +355,43 @@ Run any command without arguments to see full documentation, or type `/help` to
|
||||
unraid-mcp/
|
||||
├── unraid_mcp/ # Main package
|
||||
│ ├── main.py # Entry point
|
||||
│ ├── server.py # FastMCP server setup
|
||||
│ ├── version.py # Version management (importlib.metadata)
|
||||
│ ├── config/ # Configuration management
|
||||
│ │ ├── settings.py # Environment & settings
|
||||
│ │ └── logging.py # Logging setup
|
||||
│ ├── core/ # Core infrastructure
|
||||
│ │ ├── client.py # GraphQL client
|
||||
│ │ ├── exceptions.py # Custom exceptions
|
||||
│ │ └── types.py # Shared data types
|
||||
│ │ ├── guards.py # Destructive action guards
|
||||
│ │ ├── setup.py # Interactive credential setup
|
||||
│ │ ├── types.py # Shared data types
|
||||
│ │ └── utils.py # Utility functions
|
||||
│ ├── subscriptions/ # Real-time subscriptions
|
||||
│ │ ├── manager.py # WebSocket management
|
||||
│ │ ├── resources.py # MCP resources
|
||||
│ │ └── diagnostics.py # Diagnostic tools
|
||||
│ ├── tools/ # MCP tool categories (11 tools, ~104 actions)
|
||||
│ │ ├── info.py # System information (21 actions)
|
||||
│ │ ├── array.py # Parity checks (5 actions)
|
||||
│ │ ├── storage.py # Storage & monitoring (7 actions)
|
||||
│ │ ├── docker.py # Container management (26 actions)
|
||||
│ │ ├── virtualization.py # VM management (9 actions)
|
||||
│ │ ├── notifications.py # Notification management (14 actions)
|
||||
│ │ ├── rclone.py # Cloud storage (4 actions)
|
||||
│ │ ├── users.py # Current user query (1 action)
|
||||
│ │ ├── keys.py # API key management (5 actions)
|
||||
│ │ ├── settings.py # Server settings (9 actions)
|
||||
│ │ └── health.py # Health checks (3 actions)
|
||||
│ └── server.py # FastMCP server setup
|
||||
├── logs/ # Log files (auto-created)
|
||||
└── docker-compose.yml # Docker Compose deployment
|
||||
│ │ ├── manager.py # Persistent WebSocket manager
|
||||
│ │ ├── resources.py # MCP resources (unraid://live/*)
|
||||
│ │ ├── snapshot.py # Transient subscribe_once helpers
|
||||
│ │ ├── queries.py # Subscription query constants
|
||||
│ │ ├── diagnostics.py # Diagnostic tools
|
||||
│ │ └── utils.py # Subscription utility functions
|
||||
│ └── tools/ # Consolidated tools (unraid: 107 subactions + 2 diagnostic tools)
|
||||
│ └── unraid.py # All 15 domains in one file
|
||||
├── tests/ # Test suite
|
||||
│ ├── conftest.py # Shared fixtures
|
||||
│ ├── test_*.py # Unit tests (per domain)
|
||||
│ ├── http_layer/ # httpx-level request tests
|
||||
│ ├── integration/ # WebSocket lifecycle tests
|
||||
│ ├── safety/ # Destructive action guard tests
|
||||
│ └── schema/ # GraphQL query validation
|
||||
├── docs/ # Documentation & API references
|
||||
├── scripts/ # Build and utility scripts
|
||||
├── skills/unraid/ # Claude skill assets
|
||||
├── .claude-plugin/ # Plugin manifest & marketplace config
|
||||
├── .env.example # Environment template
|
||||
├── Dockerfile # Container image definition
|
||||
├── docker-compose.yml # Docker Compose deployment
|
||||
├── pyproject.toml # Project config & dependencies
|
||||
└── logs/ # Log files (auto-created, gitignored)
|
||||
```
|
||||
|
||||
### Code Quality Commands
|
||||
@@ -409,6 +442,28 @@ uv run unraid-mcp-server
|
||||
|
||||
# Or run via module directly
|
||||
uv run -m unraid_mcp.main
|
||||
|
||||
# Hot-reload dev server (restarts on file changes)
|
||||
fastmcp run fastmcp.http.json --reload
|
||||
|
||||
# Run via named config files
|
||||
fastmcp run fastmcp.http.json # streamable-http on :6970
|
||||
fastmcp run fastmcp.stdio.json # stdio transport
|
||||
```
|
||||
|
||||
### Ad-hoc Tool Testing (fastmcp CLI)
|
||||
```bash
|
||||
# Introspect the running server
|
||||
fastmcp list http://localhost:6970/mcp
|
||||
fastmcp list http://localhost:6970/mcp --input-schema
|
||||
|
||||
# Call a tool directly (HTTP)
|
||||
fastmcp call http://localhost:6970/mcp unraid action=health subaction=check
|
||||
fastmcp call http://localhost:6970/mcp unraid action=docker subaction=list
|
||||
|
||||
# Call without a running server (stdio config)
|
||||
fastmcp list fastmcp.stdio.json
|
||||
fastmcp call fastmcp.stdio.json unraid action=health subaction=check
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
description: Manage Unraid array parity checks
|
||||
argument-hint: [action] [correct=true/false]
|
||||
---
|
||||
|
||||
Execute the `unraid_array` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (5)
|
||||
|
||||
**Parity Check Operations:**
|
||||
- `parity_start` - Start parity check/sync (optional: correct=true to fix errors)
|
||||
- `parity_pause` - Pause running parity operation
|
||||
- `parity_resume` - Resume paused parity operation
|
||||
- `parity_cancel` - Cancel running parity operation
|
||||
- `parity_status` - Get current parity check status
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/array parity_start
|
||||
/array parity_start correct=true
|
||||
/array parity_pause
|
||||
/array parity_resume
|
||||
/array parity_cancel
|
||||
/array parity_status
|
||||
```
|
||||
|
||||
**Note:** Use `correct=true` with `parity_start` to automatically fix any parity errors found during the check.
|
||||
|
||||
Use the tool to execute the requested parity operation and report the results.
|
||||
@@ -1,48 +0,0 @@
|
||||
---
|
||||
description: Manage Docker containers on Unraid
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_docker` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (15)
|
||||
|
||||
**Query Operations:**
|
||||
- `list` - List all Docker containers with status
|
||||
- `details` - Get detailed info for a container (requires container identifier)
|
||||
- `logs` - Get container logs (requires container identifier)
|
||||
- `check_updates` - Check for available container updates
|
||||
- `port_conflicts` - Identify port conflicts
|
||||
- `networks` - List Docker networks
|
||||
- `network_details` - Get network details (requires network identifier)
|
||||
|
||||
**Container Lifecycle:**
|
||||
- `start` - Start a stopped container (requires container identifier)
|
||||
- `stop` - Stop a running container (requires container identifier)
|
||||
- `restart` - Restart a container (requires container identifier)
|
||||
- `pause` - Pause a running container (requires container identifier)
|
||||
- `unpause` - Unpause a paused container (requires container identifier)
|
||||
|
||||
**Updates & Management:**
|
||||
- `update` - Update a specific container (requires container identifier)
|
||||
- `update_all` - Update all containers with available updates
|
||||
|
||||
**⚠️ Destructive:**
|
||||
- `remove` - Permanently delete a container (requires container identifier + confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-docker list
|
||||
/unraid-docker details plex
|
||||
/unraid-docker logs plex
|
||||
/unraid-docker start nginx
|
||||
/unraid-docker restart sonarr
|
||||
/unraid-docker check_updates
|
||||
/unraid-docker update plex
|
||||
/unraid-docker port_conflicts
|
||||
```
|
||||
|
||||
**Container Identification:** Use container name, ID, or partial match (fuzzy search supported)
|
||||
|
||||
Use the tool to execute the requested Docker operation and report the results.
|
||||
@@ -1,59 +0,0 @@
|
||||
---
|
||||
description: Check Unraid system health and connectivity
|
||||
argument-hint: [action]
|
||||
---
|
||||
|
||||
Execute the `unraid_health` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (3)
|
||||
|
||||
**Health Monitoring:**
|
||||
- `check` - Comprehensive health check of all system components
|
||||
- `test_connection` - Test basic API connectivity
|
||||
- `diagnose` - Detailed diagnostic information for troubleshooting
|
||||
|
||||
## What Each Action Checks
|
||||
|
||||
### `check` - System Health
|
||||
- API connectivity and response time
|
||||
- Array status and disk health
|
||||
- Running services status
|
||||
- Docker container health
|
||||
- VM status
|
||||
- System resources (CPU, RAM, disk I/O)
|
||||
- Network connectivity
|
||||
- UPS status (if configured)
|
||||
|
||||
Returns: Overall health status (`HEALTHY`, `WARNING`, `CRITICAL`) with component details
|
||||
|
||||
### `test_connection` - Connectivity
|
||||
- GraphQL endpoint availability
|
||||
- Authentication validity
|
||||
- Basic query execution
|
||||
- Network latency
|
||||
|
||||
Returns: Connection status and latency metrics
|
||||
|
||||
### `diagnose` - Diagnostic Details
|
||||
- Full system configuration
|
||||
- Resource utilization trends
|
||||
- Error logs and warnings
|
||||
- Component-level diagnostics
|
||||
- Troubleshooting recommendations
|
||||
|
||||
Returns: Detailed diagnostic report
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-health check
|
||||
/unraid-health test_connection
|
||||
/unraid-health diagnose
|
||||
```
|
||||
|
||||
**Use Cases:**
|
||||
- `check` - Quick health status (monitoring dashboards)
|
||||
- `test_connection` - Verify API access (troubleshooting)
|
||||
- `diagnose` - Deep dive debugging (issue resolution)
|
||||
|
||||
Use the tool to execute the requested health check and present results with clear severity indicators.
|
||||
@@ -1,50 +0,0 @@
|
||||
---
|
||||
description: Query Unraid server information and configuration
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_info` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (19)
|
||||
|
||||
**System Overview:**
|
||||
- `overview` - Complete system summary with all key metrics
|
||||
- `server` - Server details (hostname, version, uptime)
|
||||
- `servers` - List all known Unraid servers
|
||||
|
||||
**Array & Storage:**
|
||||
- `array` - Array status, disks, and health
|
||||
|
||||
**Network & Registration:**
|
||||
- `network` - Network configuration and interfaces
|
||||
- `registration` - Registration status and license info
|
||||
- `connect` - Connect service configuration
|
||||
- `online` - Online status check
|
||||
|
||||
**Configuration:**
|
||||
- `config` - System configuration settings
|
||||
- `settings` - User settings and preferences
|
||||
- `variables` - Environment variables
|
||||
- `display` - Display settings
|
||||
|
||||
**Services & Monitoring:**
|
||||
- `services` - Running services status
|
||||
- `metrics` - System metrics (CPU, RAM, disk I/O)
|
||||
- `ups_devices` - List all UPS devices
|
||||
- `ups_device` - Get specific UPS device details (requires device_id)
|
||||
- `ups_config` - UPS configuration
|
||||
|
||||
**Ownership:**
|
||||
- `owner` - Server owner information
|
||||
- `flash` - USB flash drive details
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-info overview
|
||||
/unraid-info array
|
||||
/unraid-info metrics
|
||||
/unraid-info ups_device [device-id]
|
||||
```
|
||||
|
||||
Use the tool to retrieve the requested information and present it in a clear, formatted manner.
|
||||
@@ -1,37 +0,0 @@
|
||||
---
|
||||
description: Manage Unraid API keys for authentication
|
||||
argument-hint: [action] [key-id]
|
||||
---
|
||||
|
||||
Execute the `unraid_keys` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (5)
|
||||
|
||||
**Query Operations:**
|
||||
- `list` - List all API keys with metadata
|
||||
- `get` - Get details for a specific API key (requires key_id)
|
||||
|
||||
**Management Operations:**
|
||||
- `create` - Create a new API key (requires name, optional description and expiry)
|
||||
- `update` - Update an existing API key (requires key_id, name, description)
|
||||
|
||||
**⚠️ Destructive:**
|
||||
- `delete` - Permanently revoke an API key (requires key_id + confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-keys list
|
||||
/unraid-keys get [key-id]
|
||||
/unraid-keys create "MCP Server Key" "Key for unraid-mcp integration"
|
||||
/unraid-keys update [key-id] "Updated Name" "Updated description"
|
||||
```
|
||||
|
||||
**Key Format:** PrefixedID (`hex64:suffix`)
|
||||
|
||||
**IMPORTANT:**
|
||||
- Deleted keys are immediately revoked and cannot be recovered
|
||||
- Store new keys securely - they're only shown once during creation
|
||||
- Set expiry dates for keys used in automation
|
||||
|
||||
Use the tool to execute the requested API key operation and report the results.
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
description: Manage Unraid system notifications and alerts
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_notifications` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (9)
|
||||
|
||||
**Query Operations:**
|
||||
- `overview` - Summary of notification counts by category
|
||||
- `list` - List all notifications with details
|
||||
- `warnings` - List only warning/error notifications
|
||||
- `unread` - List unread notifications only
|
||||
|
||||
**Management Operations:**
|
||||
- `create` - Create a new notification (requires title, message, severity)
|
||||
- `archive` - Archive a specific notification (requires notification_id)
|
||||
- `archive_all` - Archive all current notifications
|
||||
|
||||
**⚠️ Destructive Operations:**
|
||||
- `delete` - Permanently delete a notification (requires notification_id + confirmation)
|
||||
- `delete_archived` - Permanently delete all archived notifications (requires confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-notifications overview
|
||||
/unraid-notifications list
|
||||
/unraid-notifications warnings
|
||||
/unraid-notifications unread
|
||||
/unraid-notifications create "Test Alert" "This is a test" normal
|
||||
/unraid-notifications archive [notification-id]
|
||||
/unraid-notifications archive_all
|
||||
```
|
||||
|
||||
**Severity Levels:** `normal`, `warning`, `alert`, `critical`
|
||||
|
||||
**IMPORTANT:** Delete operations are permanent and cannot be undone.
|
||||
|
||||
Use the tool to execute the requested notification operation and present results clearly.
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
description: Manage Rclone cloud storage remotes on Unraid
|
||||
argument-hint: [action] [remote-name]
|
||||
---
|
||||
|
||||
Execute the `unraid_rclone` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (4)
|
||||
|
||||
**Query Operations:**
|
||||
- `list_remotes` - List all configured Rclone remotes
|
||||
- `config_form` - Get configuration form for a remote type (requires remote_type)
|
||||
|
||||
**Management Operations:**
|
||||
- `create_remote` - Create a new Rclone remote (requires remote_name, remote_type, config)
|
||||
|
||||
**⚠️ Destructive:**
|
||||
- `delete_remote` - Permanently delete a remote (requires remote_name + confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-rclone list_remotes
|
||||
/unraid-rclone config_form s3
|
||||
/unraid-rclone create_remote mybackup s3 {"access_key":"...","secret_key":"..."}
|
||||
```
|
||||
|
||||
**Supported Remote Types:** s3, dropbox, google-drive, onedrive, backblaze, ftp, sftp, webdav, etc.
|
||||
|
||||
**IMPORTANT:** Deleting a remote does NOT delete cloud data, only the local configuration.
|
||||
|
||||
Use the tool to execute the requested Rclone operation and report the results.
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
description: Manage Unraid system settings and configuration
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_settings` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (9)
|
||||
|
||||
All settings actions are mutations that modify server configuration.
|
||||
|
||||
**General Settings:**
|
||||
- `update` - Update general system settings (timezone, locale, etc.)
|
||||
- `update_temperature` - Update temperature unit preference (Celsius/Fahrenheit)
|
||||
- `update_time` - Update NTP and time configuration
|
||||
|
||||
**UPS Configuration:**
|
||||
- `configure_ups` - Configure UPS settings (requires `confirm=True` — DESTRUCTIVE)
|
||||
|
||||
**API & Connectivity:**
|
||||
- `update_api` - Update Unraid Connect API settings
|
||||
|
||||
**Unraid Connect (My Servers):**
|
||||
- `connect_sign_in` - Sign in to Unraid Connect cloud service
|
||||
- `connect_sign_out` - Sign out of Unraid Connect cloud service
|
||||
|
||||
**Remote Access:**
|
||||
- `setup_remote_access` - Configure remote access settings (requires `confirm=True` — DESTRUCTIVE)
|
||||
- `enable_dynamic_remote_access` - Enable/configure dynamic remote access (requires `confirm=True` — DESTRUCTIVE)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-settings update
|
||||
/unraid-settings update_temperature
|
||||
/unraid-settings update_time
|
||||
/unraid-settings update_api
|
||||
/unraid-settings connect_sign_in
|
||||
/unraid-settings connect_sign_out
|
||||
```
|
||||
|
||||
**⚠️ Destructive Operations (require `confirm=True`):**
|
||||
- `configure_ups` - Modifies UPS hardware configuration
|
||||
- `setup_remote_access` - Changes network access policies
|
||||
- `enable_dynamic_remote_access` - Changes network access policies
|
||||
|
||||
**IMPORTANT:** Settings changes take effect immediately and may affect server accessibility.
|
||||
|
||||
Use the tool to execute the requested settings operation and report the results.
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
description: Query Unraid storage, shares, and disk information
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_storage` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (6)
|
||||
|
||||
**Shares & Disks:**
|
||||
- `shares` - List all user shares with sizes and allocation
|
||||
- `disks` - List all disks in the array
|
||||
- `disk_details` - Get detailed info for a specific disk (requires disk identifier)
|
||||
- `unassigned` - List unassigned devices
|
||||
|
||||
**Logs:**
|
||||
- `log_files` - List available system log files
|
||||
- `logs` - Read log file contents (requires log file path)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-storage shares
|
||||
/unraid-storage disks
|
||||
/unraid-storage disk_details disk1
|
||||
/unraid-storage unassigned
|
||||
/unraid-storage log_files
|
||||
/unraid-storage logs /var/log/syslog
|
||||
```
|
||||
|
||||
**Note:** Log file paths must start with `/var/log/`, `/boot/logs/`, or `/mnt/`
|
||||
|
||||
Use the tool to retrieve the requested storage information and present it clearly.
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
description: Query current authenticated Unraid user
|
||||
argument-hint: [action]
|
||||
---
|
||||
|
||||
Execute the `unraid_users` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (1)
|
||||
|
||||
**Query Operation:**
|
||||
- `me` - Get current authenticated user info (id, name, description, roles)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/users me
|
||||
```
|
||||
|
||||
## API Limitation
|
||||
|
||||
⚠️ **Note:** The Unraid GraphQL API does not support user management operations. Only the `me` query is available, which returns information about the currently authenticated user (the API key holder).
|
||||
|
||||
**Not supported:**
|
||||
- Listing all users
|
||||
- Getting other user details
|
||||
- Adding/deleting users
|
||||
- Cloud/remote access queries
|
||||
|
||||
For user management, use the Unraid web UI.
|
||||
|
||||
Use the tool to query the current authenticated user and report the results.
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
description: Manage virtual machines on Unraid
|
||||
argument-hint: [action] [vm-id]
|
||||
---
|
||||
|
||||
Execute the `unraid_vm` MCP tool with action: `$1` and vm_id: `$2`
|
||||
|
||||
## Available Actions (9)
|
||||
|
||||
**Query Operations:**
|
||||
- `list` - List all VMs with status and resource allocation
|
||||
- `details` - Get detailed info for a VM (requires vm_id)
|
||||
|
||||
**Lifecycle Operations:**
|
||||
- `start` - Start a stopped VM (requires vm_id)
|
||||
- `stop` - Gracefully stop a running VM (requires vm_id)
|
||||
- `pause` - Pause a running VM (requires vm_id)
|
||||
- `resume` - Resume a paused VM (requires vm_id)
|
||||
- `reboot` - Gracefully reboot a VM (requires vm_id)
|
||||
|
||||
**⚠️ Destructive Operations:**
|
||||
- `force_stop` - Forcefully power off VM (like pulling power cord - requires vm_id + confirmation)
|
||||
- `reset` - Hard reset VM (power cycle without graceful shutdown - requires vm_id + confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-vm list
|
||||
/unraid-vm details windows-10
|
||||
/unraid-vm start ubuntu-server
|
||||
/unraid-vm stop windows-10
|
||||
/unraid-vm pause debian-vm
|
||||
/unraid-vm resume debian-vm
|
||||
/unraid-vm reboot ubuntu-server
|
||||
```
|
||||
|
||||
**VM Identification:** Use VM ID (PrefixedID format: `hex64:suffix`)
|
||||
|
||||
**IMPORTANT:** `force_stop` and `reset` bypass graceful shutdown and may corrupt VM filesystem. Use `stop` instead for safe shutdowns.
|
||||
|
||||
Use the tool to execute the requested VM operation and report the results.
|
||||
188
docs/AUTHENTICATION.md
Normal file
188
docs/AUTHENTICATION.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# Authentication Setup Guide
|
||||
|
||||
This document covers both Google OAuth 2.0 and API key bearer token authentication for the Unraid MCP HTTP server. It explains how to protect the server using FastMCP's built-in `GoogleProvider` for OAuth, or a static bearer token for headless/machine access.
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
By default the MCP server is **open** — any client on the network can call tools. Setting three environment variables enables Google OAuth 2.1 authentication: clients must complete a Google login flow before the server will execute any tool.
|
||||
|
||||
OAuth state (issued tokens, refresh tokens) is persisted to an encrypted file store at `~/.local/share/fastmcp/oauth-proxy/`, so sessions survive server restarts when `UNRAID_MCP_JWT_SIGNING_KEY` is set.
|
||||
|
||||
> **Transport requirement**: OAuth only works with HTTP transports (`streamable-http` or `sse`). It has no effect on `stdio` — the server logs a warning if you configure both.
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Google account with access to [Google Cloud Console](https://console.cloud.google.com/)
|
||||
- MCP server reachable at a known URL from your browser (LAN IP, Tailscale IP, or public domain)
|
||||
- `UNRAID_MCP_TRANSPORT=streamable-http` (the default)
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Create a Google OAuth Client
|
||||
|
||||
1. Open [Google Cloud Console](https://console.cloud.google.com/) → **APIs & Services** → **Credentials**
|
||||
2. Click **Create Credentials** → **OAuth 2.0 Client ID**
|
||||
3. Application type: **Web application**
|
||||
4. Name: anything (e.g. `Unraid MCP`)
|
||||
5. **Authorized redirect URIs** — add exactly:
|
||||
```
|
||||
http://<your-server-ip>:6970/auth/callback
|
||||
```
|
||||
Replace `<your-server-ip>` with the IP/hostname your browser uses to reach the MCP server (e.g. `10.1.0.2`, `100.x.x.x` for Tailscale, or a domain name).
|
||||
6. Click **Create** — copy the **Client ID** and **Client Secret**
|
||||
|
||||
---
|
||||
|
||||
## Step 2: Configure Environment Variables
|
||||
|
||||
Add these to `~/.unraid-mcp/.env` (the canonical credential file for all runtimes):
|
||||
|
||||
```bash
|
||||
# Google OAuth (optional — enables authentication)
|
||||
GOOGLE_CLIENT_ID=your-client-id.apps.googleusercontent.com
|
||||
GOOGLE_CLIENT_SECRET=GOCSPX-your-client-secret
|
||||
|
||||
# Public base URL of this MCP server (must match the redirect URI above)
|
||||
UNRAID_MCP_BASE_URL=http://10.1.0.2:6970
|
||||
|
||||
# Stable JWT signing key — prevents token invalidation on server restart
|
||||
# Generate one: python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
UNRAID_MCP_JWT_SIGNING_KEY=your-64-char-hex-string
|
||||
```
|
||||
|
||||
**All four variables at once** (copy-paste template):
|
||||
|
||||
```bash
|
||||
cat >> ~/.unraid-mcp/.env <<'EOF'
|
||||
|
||||
# Google OAuth
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
UNRAID_MCP_BASE_URL=http://10.1.0.2:6970
|
||||
UNRAID_MCP_JWT_SIGNING_KEY=
|
||||
EOF
|
||||
```
|
||||
|
||||
Then fill in the blanks.
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Generate a Stable JWT Signing Key
|
||||
|
||||
Without `UNRAID_MCP_JWT_SIGNING_KEY`, FastMCP derives a key on startup. Any server restart invalidates all existing tokens and forces every client to re-authenticate.
|
||||
|
||||
Generate a stable key once:
|
||||
|
||||
```bash
|
||||
python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
```
|
||||
|
||||
Paste the output into `UNRAID_MCP_JWT_SIGNING_KEY`. This value never needs to change unless you intentionally want to invalidate all sessions.
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Restart the Server
|
||||
|
||||
```bash
|
||||
# Docker Compose
|
||||
docker compose restart unraid-mcp
|
||||
|
||||
# Direct / uv
|
||||
uv run unraid-mcp-server
|
||||
```
|
||||
|
||||
On startup you should see:
|
||||
|
||||
```
|
||||
INFO [SERVER] Google OAuth enabled — base_url=http://10.1.0.2:6970, redirect_uri=http://10.1.0.2:6970/auth/callback
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How Authentication Works
|
||||
|
||||
1. An MCP client connects to `http://<server>:6970/mcp`
|
||||
2. The server responds with a `401 Unauthorized` and an OAuth authorization URL
|
||||
3. The client opens the URL in a browser; the user logs in with Google
|
||||
4. Google redirects to `<UNRAID_MCP_BASE_URL>/auth/callback` with an authorization code
|
||||
5. FastMCP exchanges the code for tokens, issues a signed JWT, and returns it to the client
|
||||
6. The client includes the JWT in subsequent requests — the server validates it without hitting Google again
|
||||
7. Tokens persist to `~/.local/share/fastmcp/oauth-proxy/` — sessions survive server restarts
|
||||
|
||||
---
|
||||
|
||||
## Environment Variable Reference
|
||||
|
||||
| Variable | Required | Default | Description |
|
||||
|----------|----------|---------|-------------|
|
||||
| `GOOGLE_CLIENT_ID` | For OAuth | `""` | OAuth 2.0 Client ID from Google Cloud Console |
|
||||
| `GOOGLE_CLIENT_SECRET` | For OAuth | `""` | OAuth 2.0 Client Secret from Google Cloud Console |
|
||||
| `UNRAID_MCP_BASE_URL` | For OAuth | `""` | Public base URL of this server — must match the authorized redirect URI |
|
||||
| `UNRAID_MCP_JWT_SIGNING_KEY` | Recommended | auto-derived | Stable 32+ char secret for JWT signing — prevents token invalidation on restart |
|
||||
|
||||
OAuth is activated only when **all three** of `GOOGLE_CLIENT_ID`, `GOOGLE_CLIENT_SECRET`, and `UNRAID_MCP_BASE_URL` are non-empty. Omit any one to run without authentication.
|
||||
|
||||
---
|
||||
|
||||
## Disabling OAuth
|
||||
|
||||
Remove (or empty) `GOOGLE_CLIENT_ID` from `~/.unraid-mcp/.env` and restart. The server reverts to unauthenticated mode and logs:
|
||||
|
||||
```
|
||||
WARNING [SERVER] No authentication configured — MCP server is open to all clients on the network.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**`redirect_uri_mismatch` from Google**
|
||||
The redirect URI in Google Cloud Console must exactly match `<UNRAID_MCP_BASE_URL>/auth/callback` — same scheme, host, port, and path. Trailing slashes matter.
|
||||
|
||||
**Tokens invalidated after restart**
|
||||
Set `UNRAID_MCP_JWT_SIGNING_KEY` to a stable secret (see Step 3). Without it, FastMCP generates a new key on every start.
|
||||
|
||||
**`stdio` transport warning**
|
||||
OAuth requires an HTTP transport. Set `UNRAID_MCP_TRANSPORT=streamable-http` (the default) or `sse`.
|
||||
|
||||
**Client cannot reach the callback URL**
|
||||
`UNRAID_MCP_BASE_URL` must be the address your browser uses to reach the server — not `localhost` or `0.0.0.0`. Use the LAN IP, Tailscale IP, or a domain name.
|
||||
|
||||
**OAuth configured but server not starting**
|
||||
Check `logs/unraid-mcp.log` or `docker compose logs unraid-mcp` for startup errors.
|
||||
|
||||
---
|
||||
|
||||
## API Key Authentication (Alternative / Combined)
|
||||
|
||||
For machine-to-machine access (scripts, CI, other agents) without a browser-based OAuth flow, set `UNRAID_MCP_API_KEY`:
|
||||
|
||||
```bash
|
||||
# In ~/.unraid-mcp/.env
|
||||
UNRAID_MCP_API_KEY=your-secret-token
|
||||
```
|
||||
|
||||
Clients present it as a standard bearer token:
|
||||
|
||||
```
|
||||
Authorization: Bearer your-secret-token
|
||||
```
|
||||
|
||||
**Combining with Google OAuth**: set both `GOOGLE_CLIENT_ID` and `UNRAID_MCP_API_KEY`. The server activates `MultiAuth` and accepts either method — Google OAuth for interactive clients, API key for headless clients.
|
||||
|
||||
**Reusing the Unraid API key**: you can set `UNRAID_MCP_API_KEY` to the same value as `UNRAID_API_KEY` for simplicity. The two vars are kept separate so each concern has its own name.
|
||||
|
||||
**Standalone API key** (no Google OAuth): set only `UNRAID_MCP_API_KEY`. The server validates bearer tokens directly with no OAuth redirect flow.
|
||||
|
||||
---
|
||||
|
||||
## Security Notes
|
||||
|
||||
- OAuth protects the MCP HTTP interface — the Unraid GraphQL API itself still uses `UNRAID_API_KEY`
|
||||
- OAuth state files at `~/.local/share/fastmcp/oauth-proxy/` should be on a private filesystem; do not expose them
|
||||
- Restrict Google OAuth to specific accounts via the Google Cloud Console **OAuth consent screen** → **Test users** if you don't want to publish the app
|
||||
- `UNRAID_MCP_JWT_SIGNING_KEY` is a credential — store it in `~/.unraid-mcp/.env` (mode 600), never in source control
|
||||
@@ -1,78 +1,52 @@
|
||||
# Destructive Actions
|
||||
|
||||
**Last Updated:** 2026-03-13
|
||||
**Total destructive actions:** 15 across 7 tools
|
||||
**Last Updated:** 2026-03-16
|
||||
**Total destructive actions:** 12 across 8 domains (single `unraid` tool)
|
||||
|
||||
All destructive actions require `confirm=True` at the call site. There is no additional environment variable gate — `confirm` is the sole guard.
|
||||
|
||||
> **mcporter commands below** use `$MCP_URL` (default: `http://localhost:6970/mcp`). Run `test-actions.sh` for automated non-destructive coverage; destructive actions are always skipped there and tested manually per the strategies below.
|
||||
>
|
||||
> **Calling convention (v1.0.0+):** All operations use the single `unraid` tool with `action` (domain) + `subaction` (operation). For example:
|
||||
> `mcporter call --http-url "$MCP_URL" --tool unraid --args '{"action":"docker","subaction":"list"}'`
|
||||
|
||||
---
|
||||
|
||||
## `unraid_docker`
|
||||
## `array`
|
||||
|
||||
### `remove` — Delete a container permanently
|
||||
### `stop_array` — Stop the Unraid array
|
||||
|
||||
**Strategy: mock/safety audit only.**
|
||||
Stopping the array unmounts all shares and can interrupt running containers and VMs accessing array data. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. Do not run live unless all containers and VMs are shut down first.
|
||||
|
||||
---
|
||||
|
||||
### `remove_disk` — Remove a disk from the array
|
||||
|
||||
```bash
|
||||
# 1. Provision a throwaway canary container
|
||||
docker run -d --name mcp-test-canary alpine sleep 3600
|
||||
# Prerequisite: array must already be stopped; use a disk you intend to remove
|
||||
|
||||
# 2. Discover its MCP-assigned ID
|
||||
CID=$(mcporter call --http-url "$MCP_URL" --tool unraid_docker \
|
||||
--args '{"action":"list"}' --output json \
|
||||
| python3 -c "import json,sys; cs=json.load(sys.stdin).get('containers',[]); print(next(c['id'] for c in cs if 'mcp-test-canary' in c.get('name','')))")
|
||||
|
||||
# 3. Remove via MCP
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_docker \
|
||||
--args "{\"action\":\"remove\",\"container_id\":\"$CID\",\"confirm\":true}" --output json
|
||||
|
||||
# 4. Verify
|
||||
docker ps -a | grep mcp-test-canary # should return nothing
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"array","subaction":"remove_disk","disk_id":"<DISK_ID>","confirm":true}' --output json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `update_all` — Pull latest images and restart all containers
|
||||
|
||||
**Strategy: mock/safety audit only.**
|
||||
No safe live isolation — this hits every running container. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. Do not run live unless all containers can tolerate a simultaneous restart.
|
||||
|
||||
---
|
||||
|
||||
### `delete_entries` — Delete Docker organizer folders/entries
|
||||
### `clear_disk_stats` — Clear I/O statistics for a disk (irreversible)
|
||||
|
||||
```bash
|
||||
# 1. Create a throwaway organizer folder
|
||||
# Parameter: folder_name (str); ID is in organizer.views.flatEntries[type==FOLDER]
|
||||
FOLDER=$(mcporter call --http-url "$MCP_URL" --tool unraid_docker \
|
||||
--args '{"action":"create_folder","folder_name":"mcp-test-delete-me"}' --output json)
|
||||
FID=$(echo "$FOLDER" | python3 -c "
|
||||
import json,sys
|
||||
data=json.load(sys.stdin)
|
||||
entries=(data.get('organizer',{}).get('views',{}).get('flatEntries') or [])
|
||||
match=next((e['id'] for e in entries if e.get('type')=='FOLDER' and 'mcp-test' in e.get('name','')),'' )
|
||||
print(match)")
|
||||
# Discover disk IDs
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"disk","subaction":"disks"}' --output json
|
||||
|
||||
# 2. Delete it
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_docker \
|
||||
--args "{\"action\":\"delete_entries\",\"entry_ids\":[\"$FID\"],\"confirm\":true}" --output json
|
||||
|
||||
# 3. Verify
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_docker \
|
||||
--args '{"action":"list"}' --output json | python3 -c \
|
||||
"import json,sys; folders=[x for x in json.load(sys.stdin).get('folders',[]) if 'mcp-test' in x.get('name','')]; print('clean' if not folders else folders)"
|
||||
# Clear stats for a specific disk
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"array","subaction":"clear_disk_stats","disk_id":"<DISK_ID>","confirm":true}' --output json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `reset_template_mappings` — Wipe all template-to-container associations
|
||||
|
||||
**Strategy: mock/safety audit only.**
|
||||
Global state — wipes all template mappings, requires full remapping afterward. No safe isolation. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`.
|
||||
|
||||
---
|
||||
|
||||
## `unraid_vm`
|
||||
## `vm`
|
||||
|
||||
### `force_stop` — Hard power-off a VM (potential data corruption)
|
||||
|
||||
@@ -80,16 +54,16 @@ Global state — wipes all template mappings, requires full remapping afterward.
|
||||
# Prerequisite: create a minimal Alpine test VM in Unraid VM manager
|
||||
# (Alpine ISO, 512MB RAM, no persistent disk, name contains "mcp-test")
|
||||
|
||||
VID=$(mcporter call --http-url "$MCP_URL" --tool unraid_vm \
|
||||
--args '{"action":"list"}' --output json \
|
||||
VID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"vm","subaction":"list"}' --output json \
|
||||
| python3 -c "import json,sys; vms=json.load(sys.stdin).get('vms',[]); print(next(v.get('uuid',v.get('id','')) for v in vms if 'mcp-test' in v.get('name','')))")
|
||||
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_vm \
|
||||
--args "{\"action\":\"force_stop\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"vm\",\"subaction\":\"force_stop\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json
|
||||
|
||||
# Verify: VM state should return to stopped
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_vm \
|
||||
--args "{\"action\":\"details\",\"vm_id\":\"$VID\"}" --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"vm\",\"subaction\":\"details\",\"vm_id\":\"$VID\"}" --output json
|
||||
```
|
||||
|
||||
---
|
||||
@@ -98,27 +72,27 @@ mcporter call --http-url "$MCP_URL" --tool unraid_vm \
|
||||
|
||||
```bash
|
||||
# Same minimal Alpine test VM as above
|
||||
VID=$(mcporter call --http-url "$MCP_URL" --tool unraid_vm \
|
||||
--args '{"action":"list"}' --output json \
|
||||
VID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"vm","subaction":"list"}' --output json \
|
||||
| python3 -c "import json,sys; vms=json.load(sys.stdin).get('vms',[]); print(next(v.get('uuid',v.get('id','')) for v in vms if 'mcp-test' in v.get('name','')))")
|
||||
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_vm \
|
||||
--args "{\"action\":\"reset\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"vm\",\"subaction\":\"reset\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `unraid_notifications`
|
||||
## `notification`
|
||||
|
||||
### `delete` — Permanently delete a notification
|
||||
|
||||
```bash
|
||||
# 1. Create a test notification, then list to get the real stored ID (create response
|
||||
# ID is ULID-based; stored filename uses a unix timestamp, so IDs differ)
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
--args '{"action":"create","title":"mcp-test-delete","subject":"safe to delete","description":"MCP destructive action test","importance":"INFO"}' --output json
|
||||
NID=$(mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
--args '{"action":"list","notification_type":"UNREAD"}' --output json \
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"create","title":"mcp-test-delete","subject":"safe to delete","description":"MCP destructive action test","importance":"INFO"}' --output json
|
||||
NID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"list","notification_type":"UNREAD"}' --output json \
|
||||
| python3 -c "
|
||||
import json,sys
|
||||
notifs=json.load(sys.stdin).get('notifications',[])
|
||||
@@ -126,12 +100,12 @@ matches=[n['id'] for n in reversed(notifs) if n.get('title')=='mcp-test-delete']
|
||||
print(matches[0] if matches else '')")
|
||||
|
||||
# 2. Delete it (notification_type required)
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
--args "{\"action\":\"delete\",\"notification_id\":\"$NID\",\"notification_type\":\"UNREAD\",\"confirm\":true}" --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"notification\",\"subaction\":\"delete\",\"notification_id\":\"$NID\",\"notification_type\":\"UNREAD\",\"confirm\":true}" --output json
|
||||
|
||||
# 3. Verify
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
--args '{"action":"list"}' --output json | python3 -c \
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"list"}' --output json | python3 -c \
|
||||
"import json,sys; ns=[n for n in json.load(sys.stdin).get('notifications',[]) if 'mcp-test' in n.get('title','')]; print('clean' if not ns else ns)"
|
||||
```
|
||||
|
||||
@@ -141,45 +115,45 @@ mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
|
||||
```bash
|
||||
# 1. Create and archive a test notification
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
--args '{"action":"create","title":"mcp-test-archive-wipe","subject":"archive me","description":"safe to delete","importance":"INFO"}' --output json
|
||||
AID=$(mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
--args '{"action":"list","notification_type":"UNREAD"}' --output json \
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"create","title":"mcp-test-archive-wipe","subject":"archive me","description":"safe to delete","importance":"INFO"}' --output json
|
||||
AID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"list","notification_type":"UNREAD"}' --output json \
|
||||
| python3 -c "
|
||||
import json,sys
|
||||
notifs=json.load(sys.stdin).get('notifications',[])
|
||||
matches=[n['id'] for n in reversed(notifs) if n.get('title')=='mcp-test-archive-wipe']
|
||||
print(matches[0] if matches else '')")
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
--args "{\"action\":\"archive\",\"notification_id\":\"$AID\"}" --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"notification\",\"subaction\":\"archive\",\"notification_id\":\"$AID\"}" --output json
|
||||
|
||||
# 2. Wipe all archived
|
||||
# NOTE: this deletes ALL archived notifications, not just the test one
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_notifications \
|
||||
--args '{"action":"delete_archived","confirm":true}' --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"delete_archived","confirm":true}' --output json
|
||||
```
|
||||
|
||||
> Run on `shart` if archival history on `tootie` matters.
|
||||
|
||||
---
|
||||
|
||||
## `unraid_rclone`
|
||||
## `rclone`
|
||||
|
||||
### `delete_remote` — Remove an rclone remote configuration
|
||||
|
||||
```bash
|
||||
# 1. Create a throwaway local remote (points to /tmp — no real data)
|
||||
# Parameters: name (str), provider_type (str), config_data (dict)
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_rclone \
|
||||
--args '{"action":"create_remote","name":"mcp-test-remote","provider_type":"local","config_data":{"root":"/tmp"}}' --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"rclone","subaction":"create_remote","name":"mcp-test-remote","provider_type":"local","config_data":{"root":"/tmp"}}' --output json
|
||||
|
||||
# 2. Delete it
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_rclone \
|
||||
--args '{"action":"delete_remote","name":"mcp-test-remote","confirm":true}' --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"rclone","subaction":"delete_remote","name":"mcp-test-remote","confirm":true}' --output json
|
||||
|
||||
# 3. Verify
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_rclone \
|
||||
--args '{"action":"list_remotes"}' --output json | python3 -c \
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"rclone","subaction":"list_remotes"}' --output json | python3 -c \
|
||||
"import json,sys; remotes=json.load(sys.stdin).get('remotes',[]); print('clean' if 'mcp-test-remote' not in remotes else 'FOUND — cleanup failed')"
|
||||
```
|
||||
|
||||
@@ -187,29 +161,29 @@ mcporter call --http-url "$MCP_URL" --tool unraid_rclone \
|
||||
|
||||
---
|
||||
|
||||
## `unraid_keys`
|
||||
## `key`
|
||||
|
||||
### `delete` — Delete an API key (immediately revokes access)
|
||||
|
||||
```bash
|
||||
# 1. Create a test key (names cannot contain hyphens; ID is at key.id)
|
||||
KID=$(mcporter call --http-url "$MCP_URL" --tool unraid_keys \
|
||||
--args '{"action":"create","name":"mcp test key","roles":["VIEWER"]}' --output json \
|
||||
KID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"key","subaction":"create","name":"mcp test key","roles":["VIEWER"]}' --output json \
|
||||
| python3 -c "import json,sys; print(json.load(sys.stdin).get('key',{}).get('id',''))")
|
||||
|
||||
# 2. Delete it
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_keys \
|
||||
--args "{\"action\":\"delete\",\"key_id\":\"$KID\",\"confirm\":true}" --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"key\",\"subaction\":\"delete\",\"key_id\":\"$KID\",\"confirm\":true}" --output json
|
||||
|
||||
# 3. Verify
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_keys \
|
||||
--args '{"action":"list"}' --output json | python3 -c \
|
||||
"import json,sys; ks=json.load(sys.stdin).get('keys',[]); print('clean' if not any('mcp-test-key' in k.get('name','') for k in ks) else 'FOUND — cleanup failed')"
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"key","subaction":"list"}' --output json | python3 -c \
|
||||
"import json,sys; ks=json.load(sys.stdin).get('keys',[]); print('clean' if not any('mcp test key' in k.get('name','') for k in ks) else 'FOUND — cleanup failed')"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `unraid_storage`
|
||||
## `disk`
|
||||
|
||||
### `flash_backup` — Rclone backup of flash drive (overwrites destination)
|
||||
|
||||
@@ -217,70 +191,34 @@ mcporter call --http-url "$MCP_URL" --tool unraid_keys \
|
||||
# Prerequisite: create a dedicated test remote pointing away from real backup destination
|
||||
# (use rclone create_remote first, or configure mcp-test-remote manually)
|
||||
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_storage \
|
||||
--args '{"action":"flash_backup","remote_name":"mcp-test-remote","source_path":"/boot","destination_path":"/flash-backup-test","confirm":true}' --output json
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"disk","subaction":"flash_backup","remote_name":"mcp-test-remote","source_path":"/boot","destination_path":"/flash-backup-test","confirm":true}' --output json
|
||||
```
|
||||
|
||||
> Never point at the same destination as your real flash backup. Create a dedicated `mcp-test-remote` (see `rclone: delete_remote` above for provisioning pattern).
|
||||
|
||||
---
|
||||
|
||||
## `unraid_settings`
|
||||
## `setting`
|
||||
|
||||
### `configure_ups` — Overwrite UPS monitoring configuration
|
||||
|
||||
**Strategy: mock/safety audit only.**
|
||||
Wrong config can break UPS integration. If live testing is required: read current config via `unraid_info ups_config`, save values, re-apply identical values (no-op), verify response matches. Test via `tests/safety/` for guard behavior.
|
||||
Wrong config can break UPS integration. If live testing is required: read current config via `unraid(action="system", subaction="ups_config")`, save values, re-apply identical values (no-op), verify response matches. Test via `tests/safety/` for guard behavior.
|
||||
|
||||
---
|
||||
|
||||
### `setup_remote_access` — Modify remote access configuration
|
||||
## `plugin`
|
||||
|
||||
### `remove` — Uninstall a plugin (irreversible without re-install)
|
||||
|
||||
**Strategy: mock/safety audit only.**
|
||||
Misconfiguration can break remote connectivity and lock you out. Do not run live. Test via `tests/safety/` confirming `confirm=False` raises `ToolError`.
|
||||
|
||||
---
|
||||
|
||||
### `enable_dynamic_remote_access` — Toggle dynamic remote access
|
||||
Removing a plugin cannot be undone without a full re-install. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. Do not run live unless the plugin is intentionally being uninstalled.
|
||||
|
||||
```bash
|
||||
# Strategy: toggle to false (disabling is reversible) on shart only, then restore
|
||||
# Step 1: Read current state
|
||||
CURRENT=$(mcporter call --http-url "$SHART_MCP_URL" --tool unraid_info \
|
||||
--args '{"action":"settings"}' --output json)
|
||||
|
||||
# Step 2: Disable (safe — can be re-enabled)
|
||||
mcporter call --http-url "$SHART_MCP_URL" --tool unraid_settings \
|
||||
--args '{"action":"enable_dynamic_remote_access","access_url_type":"SUBDOMAINS","dynamic_enabled":false,"confirm":true}' --output json
|
||||
|
||||
# Step 3: Restore to previous state
|
||||
mcporter call --http-url "$SHART_MCP_URL" --tool unraid_settings \
|
||||
--args '{"action":"enable_dynamic_remote_access","access_url_type":"SUBDOMAINS","dynamic_enabled":true,"confirm":true}' --output json
|
||||
```
|
||||
|
||||
> Run on `shart` (10.1.0.3) only — never `tootie`.
|
||||
|
||||
---
|
||||
|
||||
## `unraid_info`
|
||||
|
||||
### `update_ssh` — Change SSH enabled state and port
|
||||
|
||||
```bash
|
||||
# Strategy: read current config, re-apply same values (no-op change)
|
||||
|
||||
# 1. Read current SSH settings
|
||||
CURRENT=$(mcporter call --http-url "$MCP_URL" --tool unraid_info \
|
||||
--args '{"action":"settings"}' --output json)
|
||||
SSH_ENABLED=$(echo "$CURRENT" | python3 -c "import json,sys; print(json.load(sys.stdin).get('ssh',{}).get('enabled', True))")
|
||||
SSH_PORT=$(echo "$CURRENT" | python3 -c "import json,sys; print(json.load(sys.stdin).get('ssh',{}).get('port', 22))")
|
||||
|
||||
# 2. Re-apply same values (no-op)
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid_info \
|
||||
--args "{\"action\":\"update_ssh\",\"ssh_enabled\":$SSH_ENABLED,\"ssh_port\":$SSH_PORT,\"confirm\":true}" --output json
|
||||
|
||||
# 3. Verify SSH connectivity still works
|
||||
ssh root@"$UNRAID_HOST" -p "$SSH_PORT" exit
|
||||
# If live testing is necessary (intentional removal only):
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"plugin","subaction":"remove","names":["<plugin-name>"],"confirm":true}' --output json
|
||||
```
|
||||
|
||||
---
|
||||
@@ -290,7 +228,9 @@ ssh root@"$UNRAID_HOST" -p "$SSH_PORT" exit
|
||||
The `tests/safety/` directory contains pytest tests that verify:
|
||||
- Every destructive action raises `ToolError` when called with `confirm=False`
|
||||
- Every destructive action raises `ToolError` when called without the `confirm` parameter
|
||||
- The `DESTRUCTIVE_ACTIONS` set in each tool file stays in sync with the actions listed above
|
||||
- The `_*_DESTRUCTIVE` sets in `unraid_mcp/tools/unraid.py` stay in sync with the actions listed above
|
||||
- No GraphQL request reaches the network layer when confirmation is missing (`TestNoGraphQLCallsWhenUnconfirmed`)
|
||||
- Non-destructive actions never require `confirm` (`TestNonDestructiveActionsNeverRequireConfirm`)
|
||||
|
||||
These run as part of the standard test suite:
|
||||
|
||||
@@ -302,20 +242,17 @@ uv run pytest tests/safety/ -v
|
||||
|
||||
## Summary Table
|
||||
|
||||
| Tool | Action | Strategy | Target Server |
|
||||
|------|--------|----------|---------------|
|
||||
| `unraid_docker` | `remove` | Pre-existing stopped container on Unraid server (skipped in test-destructive.sh) | either |
|
||||
| `unraid_docker` | `update_all` | Mock/safety audit only | — |
|
||||
| `unraid_docker` | `delete_entries` | Create folder → destroy | either |
|
||||
| `unraid_docker` | `reset_template_mappings` | Mock/safety audit only | — |
|
||||
| `unraid_vm` | `force_stop` | Minimal Alpine test VM | either |
|
||||
| `unraid_vm` | `reset` | Minimal Alpine test VM | either |
|
||||
| `unraid_notifications` | `delete` | Create notification → destroy | either |
|
||||
| `unraid_notifications` | `delete_archived` | Create → archive → wipe | shart preferred |
|
||||
| `unraid_rclone` | `delete_remote` | Create local:/tmp remote → destroy | either |
|
||||
| `unraid_keys` | `delete` | Create test key → destroy | either |
|
||||
| `unraid_storage` | `flash_backup` | Dedicated test remote, isolated path | either |
|
||||
| `unraid_settings` | `configure_ups` | Mock/safety audit only | — |
|
||||
| `unraid_settings` | `setup_remote_access` | Mock/safety audit only | — |
|
||||
| `unraid_settings` | `enable_dynamic_remote_access` | Toggle false → restore | shart only |
|
||||
| `unraid_info` | `update_ssh` | Read → re-apply same values (no-op) | either |
|
||||
| Domain (`action=`) | Subaction | Strategy | Target Server |
|
||||
|--------------------|-----------|----------|---------------|
|
||||
| `array` | `stop_array` | Mock/safety audit only | — |
|
||||
| `array` | `remove_disk` | Array must be stopped; use intended disk | either |
|
||||
| `array` | `clear_disk_stats` | Discover disk ID → clear | either |
|
||||
| `vm` | `force_stop` | Minimal Alpine test VM | either |
|
||||
| `vm` | `reset` | Minimal Alpine test VM | either |
|
||||
| `notification` | `delete` | Create notification → destroy | either |
|
||||
| `notification` | `delete_archived` | Create → archive → wipe | shart preferred |
|
||||
| `rclone` | `delete_remote` | Create local:/tmp remote → destroy | either |
|
||||
| `key` | `delete` | Create test key → destroy | either |
|
||||
| `disk` | `flash_backup` | Dedicated test remote, isolated path | either |
|
||||
| `setting` | `configure_ups` | Mock/safety audit only | — |
|
||||
| `plugin` | `remove` | Mock/safety audit only | — |
|
||||
|
||||
@@ -14,10 +14,10 @@ The marketplace catalog that lists all available plugins in this repository.
|
||||
- Plugin catalog with the "unraid" skill
|
||||
- Categories and tags for discoverability
|
||||
|
||||
### 2. Plugin Manifest (`skills/unraid/.claude-plugin/plugin.json`)
|
||||
### 2. Plugin Manifest (`.claude-plugin/plugin.json`)
|
||||
The individual plugin configuration for the Unraid skill.
|
||||
|
||||
**Location:** `skills/unraid/.claude-plugin/plugin.json`
|
||||
**Location:** `.claude-plugin/plugin.json`
|
||||
|
||||
**Contents:**
|
||||
- Plugin name, version, author
|
||||
@@ -73,12 +73,11 @@ Users can also install from a specific commit or branch:
|
||||
|
||||
```text
|
||||
unraid-mcp/
|
||||
├── .claude-plugin/ # Marketplace manifest
|
||||
│ ├── marketplace.json
|
||||
│ └── README.md
|
||||
├── skills/unraid/ # Plugin directory
|
||||
│ ├── .claude-plugin/ # Plugin manifest
|
||||
│ │ └── plugin.json
|
||||
├── .claude-plugin/ # Plugin manifest + marketplace manifest
|
||||
│ ├── plugin.json # Plugin configuration (name, version, mcpServers)
|
||||
│ ├── marketplace.json # Marketplace catalog
|
||||
│ └── README.md # Marketplace installation guide
|
||||
├── skills/unraid/ # Skill documentation and helpers
|
||||
│ ├── SKILL.md # Skill documentation
|
||||
│ ├── README.md # Plugin documentation
|
||||
│ ├── examples/ # Example scripts
|
||||
@@ -112,7 +111,7 @@ Before publishing to GitHub:
|
||||
|
||||
2. **Update Version Numbers**
|
||||
- Bump version in `.claude-plugin/marketplace.json`
|
||||
- Bump version in `skills/unraid/.claude-plugin/plugin.json`
|
||||
- Bump version in `.claude-plugin/plugin.json`
|
||||
- Update version in `README.md` if needed
|
||||
|
||||
3. **Test Locally**
|
||||
@@ -123,15 +122,15 @@ Before publishing to GitHub:
|
||||
|
||||
4. **Commit and Push**
|
||||
```bash
|
||||
git add .claude-plugin/ skills/unraid/.claude-plugin/
|
||||
git add .claude-plugin/
|
||||
git commit -m "feat: add Claude Code marketplace configuration"
|
||||
git push origin main
|
||||
```
|
||||
|
||||
5. **Create Release Tag** (Optional)
|
||||
```bash
|
||||
git tag -a v0.2.0 -m "Release v0.2.0"
|
||||
git push origin v0.2.0
|
||||
git tag -a v1.0.0 -m "Release v1.0.0"
|
||||
git push origin v1.0.0
|
||||
```
|
||||
|
||||
## User Experience
|
||||
@@ -159,7 +158,7 @@ After installation, users will:
|
||||
To release a new version:
|
||||
|
||||
1. Make changes to the plugin
|
||||
2. Update version in `skills/unraid/.claude-plugin/plugin.json`
|
||||
2. Update version in `.claude-plugin/plugin.json`
|
||||
3. Update marketplace catalog in `.claude-plugin/marketplace.json`
|
||||
4. Run validation: `./scripts/validate-marketplace.sh`
|
||||
5. Commit and push
|
||||
|
||||
@@ -40,7 +40,7 @@ Before publishing, update the version in `pyproject.toml`:
|
||||
|
||||
```toml
|
||||
[project]
|
||||
version = "0.2.1" # Follow semantic versioning: MAJOR.MINOR.PATCH
|
||||
version = "1.0.0" # Follow semantic versioning: MAJOR.MINOR.PATCH
|
||||
```
|
||||
|
||||
**Semantic Versioning Guide:**
|
||||
@@ -156,7 +156,7 @@ UNRAID_API_URL=https://your-server uvx unraid-mcp-server
|
||||
**Benefits of uvx:**
|
||||
- No installation required
|
||||
- Automatic virtual environment management
|
||||
- Always uses the latest version (or specify version: `uvx unraid-mcp-server@0.2.0`)
|
||||
- Always uses the latest version (or specify version: `uvx unraid-mcp-server@1.0.0`)
|
||||
- Clean execution environment
|
||||
|
||||
## Automation with GitHub Actions (Future)
|
||||
|
||||
23
fastmcp.http.json
Normal file
23
fastmcp.http.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"$schema": "https://gofastmcp.com/public/schemas/fastmcp.json/v1.json",
|
||||
"source": {
|
||||
"path": "unraid_mcp/server.py",
|
||||
"entrypoint": "mcp"
|
||||
},
|
||||
"environment": {
|
||||
"type": "uv",
|
||||
"python": "3.12",
|
||||
"editable": ["."]
|
||||
},
|
||||
"deployment": {
|
||||
"transport": "http",
|
||||
"host": "0.0.0.0",
|
||||
"port": 6970,
|
||||
"path": "/mcp",
|
||||
"log_level": "INFO",
|
||||
"env": {
|
||||
"UNRAID_API_URL": "${UNRAID_API_URL}",
|
||||
"UNRAID_API_KEY": "${UNRAID_API_KEY}"
|
||||
}
|
||||
}
|
||||
}
|
||||
20
fastmcp.stdio.json
Normal file
20
fastmcp.stdio.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"$schema": "https://gofastmcp.com/public/schemas/fastmcp.json/v1.json",
|
||||
"source": {
|
||||
"path": "unraid_mcp/server.py",
|
||||
"entrypoint": "mcp"
|
||||
},
|
||||
"environment": {
|
||||
"type": "uv",
|
||||
"python": "3.12",
|
||||
"editable": ["."]
|
||||
},
|
||||
"deployment": {
|
||||
"transport": "stdio",
|
||||
"log_level": "INFO",
|
||||
"env": {
|
||||
"UNRAID_API_URL": "${UNRAID_API_URL}",
|
||||
"UNRAID_API_KEY": "${UNRAID_API_KEY}"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,7 @@ build-backend = "hatchling.build"
|
||||
# ============================================================================
|
||||
[project]
|
||||
name = "unraid-mcp"
|
||||
version = "0.6.0"
|
||||
version = "1.1.2"
|
||||
description = "MCP Server for Unraid API - provides tools to interact with an Unraid server's GraphQL API"
|
||||
readme = "README.md"
|
||||
license = {file = "LICENSE"}
|
||||
@@ -71,7 +71,7 @@ classifiers = [
|
||||
# ============================================================================
|
||||
dependencies = [
|
||||
"python-dotenv>=1.1.1",
|
||||
"fastmcp>=2.14.5",
|
||||
"fastmcp>=3.0.0",
|
||||
"httpx>=0.28.1",
|
||||
"fastapi>=0.115.0",
|
||||
"uvicorn[standard]>=0.35.0",
|
||||
@@ -258,6 +258,7 @@ omit = [
|
||||
]
|
||||
|
||||
[tool.coverage.report]
|
||||
fail_under = 80
|
||||
precision = 2
|
||||
show_missing = true
|
||||
skip_covered = false
|
||||
|
||||
@@ -70,6 +70,20 @@ else
|
||||
echo -e "Checking: Plugin source path is valid... ${RED}✗${NC} (plugin not found in marketplace)"
|
||||
fi
|
||||
|
||||
# Check version sync between pyproject.toml and plugin.json
|
||||
echo "Checking version sync..."
|
||||
TOML_VER=$(grep '^version = ' pyproject.toml | sed 's/version = "//;s/"//')
|
||||
PLUGIN_VER=$(python3 -c "import json; print(json.load(open('.claude-plugin/plugin.json'))['version'])" 2>/dev/null || echo "ERROR_READING")
|
||||
if [ "$TOML_VER" != "$PLUGIN_VER" ]; then
|
||||
echo -e "${RED}FAIL: Version mismatch — pyproject.toml=$TOML_VER, plugin.json=$PLUGIN_VER${NC}"
|
||||
CHECKS=$((CHECKS + 1))
|
||||
FAILED=$((FAILED + 1))
|
||||
else
|
||||
echo -e "${GREEN}PASS: Versions in sync ($TOML_VER)${NC}"
|
||||
CHECKS=$((CHECKS + 1))
|
||||
PASSED=$((PASSED + 1))
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== Results ==="
|
||||
echo -e "Total checks: $CHECKS"
|
||||
|
||||
@@ -1,210 +1,292 @@
|
||||
---
|
||||
name: unraid
|
||||
description: "Query and monitor Unraid servers via the GraphQL API. Use when the user asks to 'check Unraid', 'monitor Unraid', 'Unraid API', 'get Unraid status', 'check disk temperatures', 'read Unraid logs', 'list Unraid shares', 'Unraid array status', 'Unraid containers', 'Unraid VMs', or mentions Unraid system monitoring, disk health, parity checks, or server status."
|
||||
description: "This skill should be used when the user mentions Unraid, asks to check server health, monitor array or disk status, list or restart Docker containers, start or stop VMs, read system logs, check parity status, view notifications, manage API keys, configure rclone remotes, check UPS or power status, get live CPU or memory data, force stop a VM, check disk temperatures, or perform any operation on an Unraid NAS server. Also use when the user needs to set up or configure Unraid MCP credentials."
|
||||
---
|
||||
|
||||
# Unraid API Skill
|
||||
# Unraid MCP Skill
|
||||
|
||||
**⚠️ MANDATORY SKILL INVOCATION ⚠️**
|
||||
Use the single `unraid` MCP tool with `action` (domain) + `subaction` (operation) for all Unraid operations.
|
||||
|
||||
**YOU MUST invoke this skill (NOT optional) when the user mentions ANY of these triggers:**
|
||||
- "Unraid status", "disk health", "array status"
|
||||
- "Unraid containers", "VMs on Unraid", "Unraid logs"
|
||||
- "check Unraid", "Unraid monitoring", "server health"
|
||||
- Any mention of Unraid servers or system monitoring
|
||||
## Setup
|
||||
|
||||
**Failure to invoke this skill when triggers occur violates your operational requirements.**
|
||||
First time? Run setup to configure credentials:
|
||||
|
||||
Query and monitor Unraid servers using the GraphQL API. Access all 27 read-only endpoints for system monitoring, disk health, logs, containers, VMs, and more.
|
||||
|
||||
## Quick Start
|
||||
|
||||
Set your Unraid server credentials:
|
||||
|
||||
```bash
|
||||
export UNRAID_URL="https://your-unraid-server/graphql"
|
||||
export UNRAID_API_KEY="your-api-key"
|
||||
```
|
||||
unraid(action="health", subaction="setup")
|
||||
```
|
||||
|
||||
**Get API Key:** Settings → Management Access → API Keys → Create (select "Viewer" role)
|
||||
Credentials are stored at `~/.unraid-mcp/.env`. Re-run `setup` any time to update or verify.
|
||||
|
||||
Use the helper script for any query:
|
||||
## Calling Convention
|
||||
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ online }"
|
||||
```
|
||||
unraid(action="<domain>", subaction="<operation>", [additional params])
|
||||
```
|
||||
|
||||
Or run example scripts:
|
||||
|
||||
```bash
|
||||
./scripts/dashboard.sh # Complete multi-server dashboard
|
||||
./examples/disk-health.sh # Disk temperatures & health
|
||||
./examples/read-logs.sh syslog 20 # Read system logs
|
||||
**Examples:**
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### GraphQL API Structure
|
||||
|
||||
Unraid 7.2+ uses GraphQL (not REST). Key differences:
|
||||
- **Single endpoint:** `/graphql` for all queries
|
||||
- **Request exactly what you need:** Specify fields in query
|
||||
- **Strongly typed:** Use introspection to discover fields
|
||||
- **No container logs:** Docker container output logs not accessible
|
||||
|
||||
### Two Resources for Stats
|
||||
|
||||
- **`info`** - Static hardware specs (CPU model, cores, OS version)
|
||||
- **`metrics`** - Real-time usage (CPU %, memory %, current load)
|
||||
|
||||
Always use `metrics` for monitoring, `info` for specifications.
|
||||
|
||||
## Common Tasks
|
||||
|
||||
### System Monitoring
|
||||
|
||||
**Check if server is online:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ online }"
|
||||
```
|
||||
|
||||
**Get CPU and memory usage:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ metrics { cpu { percentTotal } memory { used total percentTotal } } }"
|
||||
```
|
||||
|
||||
**Complete dashboard:**
|
||||
```bash
|
||||
./scripts/dashboard.sh
|
||||
```
|
||||
|
||||
### Disk Management
|
||||
|
||||
**Check disk health and temperatures:**
|
||||
```bash
|
||||
./examples/disk-health.sh
|
||||
```
|
||||
|
||||
**Get array status:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ array { state parityCheckStatus { status progress errors } } }"
|
||||
```
|
||||
|
||||
**List all physical disks (including cache/USB):**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ disks { name } }"
|
||||
```
|
||||
|
||||
### Storage Shares
|
||||
|
||||
**List network shares:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ shares { name comment } }"
|
||||
```
|
||||
|
||||
### Logs
|
||||
|
||||
**List available logs:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ logFiles { name size modifiedAt } }"
|
||||
```
|
||||
|
||||
**Read log content:**
|
||||
```bash
|
||||
./examples/read-logs.sh syslog 20
|
||||
```
|
||||
|
||||
### Containers & VMs
|
||||
|
||||
**List Docker containers:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ docker { containers { names image state status } } }"
|
||||
```
|
||||
|
||||
**List VMs:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ vms { domain { name state } } }"
|
||||
```
|
||||
|
||||
**Note:** Container output logs are NOT accessible via API. Use `docker logs` via SSH.
|
||||
|
||||
### Notifications
|
||||
|
||||
**Get notification counts:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ notifications { overview { unread { info warning alert total } } } }"
|
||||
```
|
||||
|
||||
## Helper Script Usage
|
||||
|
||||
The `scripts/unraid-query.sh` helper supports:
|
||||
|
||||
```bash
|
||||
# Basic usage
|
||||
./scripts/unraid-query.sh -u URL -k API_KEY -q "QUERY"
|
||||
|
||||
# Use environment variables
|
||||
export UNRAID_URL="https://unraid.local/graphql"
|
||||
export UNRAID_API_KEY="your-key"
|
||||
./scripts/unraid-query.sh -q "{ online }"
|
||||
|
||||
# Format options
|
||||
-f json # Raw JSON (default)
|
||||
-f pretty # Pretty-printed JSON
|
||||
-f raw # Just the data (no wrapper)
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
### Reference Files
|
||||
|
||||
For detailed documentation, consult:
|
||||
- **`references/endpoints.md`** - Complete list of all 27 API endpoints
|
||||
- **`references/troubleshooting.md`** - Common errors and solutions
|
||||
- **`references/api-reference.md`** - Detailed field documentation
|
||||
|
||||
### Helper Scripts
|
||||
|
||||
- **`scripts/unraid-query.sh`** - Main GraphQL query tool
|
||||
- **`scripts/dashboard.sh`** - Automated multi-server inventory reporter
|
||||
|
||||
## Quick Command Reference
|
||||
|
||||
```bash
|
||||
# System status
|
||||
./scripts/unraid-query.sh -q "{ online metrics { cpu { percentTotal } } }"
|
||||
|
||||
# Disk health
|
||||
./examples/disk-health.sh
|
||||
|
||||
# Array status
|
||||
./scripts/unraid-query.sh -q "{ array { state } }"
|
||||
|
||||
# Read logs
|
||||
./examples/read-logs.sh syslog 20
|
||||
|
||||
# Complete dashboard
|
||||
./scripts/dashboard.sh
|
||||
|
||||
# List shares
|
||||
./scripts/unraid-query.sh -q "{ shares { name } }"
|
||||
|
||||
# List containers
|
||||
./scripts/unraid-query.sh -q "{ docker { containers { names state } } }"
|
||||
unraid(action="system", subaction="overview")
|
||||
unraid(action="docker", subaction="list")
|
||||
unraid(action="health", subaction="check")
|
||||
unraid(action="array", subaction="parity_status")
|
||||
unraid(action="disk", subaction="disks")
|
||||
unraid(action="vm", subaction="list")
|
||||
unraid(action="notification", subaction="overview")
|
||||
unraid(action="live", subaction="cpu")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Agent Tool Usage Requirements
|
||||
## All Domains and Subactions
|
||||
|
||||
**CRITICAL:** When invoking scripts from this skill via the zsh-tool, **ALWAYS use `pty: true`**.
|
||||
### `system` — Server Information
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `overview` | Complete system summary (recommended starting point) |
|
||||
| `server` | Hostname, version, uptime |
|
||||
| `servers` | All known Unraid servers |
|
||||
| `array` | Array status and disk list |
|
||||
| `network` | Network interfaces and config |
|
||||
| `registration` | License and registration status |
|
||||
| `variables` | Environment variables |
|
||||
| `metrics` | Real-time CPU, memory, I/O usage |
|
||||
| `services` | Running services status |
|
||||
| `display` | Display settings |
|
||||
| `config` | System configuration |
|
||||
| `online` | Quick online status check |
|
||||
| `owner` | Server owner information |
|
||||
| `settings` | User settings and preferences |
|
||||
| `flash` | USB flash drive details |
|
||||
| `ups_devices` | List all UPS devices |
|
||||
| `ups_device` | Single UPS device (requires `device_id`) |
|
||||
| `ups_config` | UPS configuration |
|
||||
|
||||
Without PTY mode, command output will not be visible even though commands execute successfully.
|
||||
### `health` — Diagnostics
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `check` | Comprehensive health check — connectivity, array, disks, containers, VMs, resources |
|
||||
| `test_connection` | Test API connectivity and authentication |
|
||||
| `diagnose` | Detailed diagnostic report with troubleshooting recommendations |
|
||||
| `setup` | Configure credentials interactively (stores to `~/.unraid-mcp/.env`) |
|
||||
|
||||
**Correct invocation pattern:**
|
||||
```typescript
|
||||
<invoke name="mcp__plugin_zsh-tool_zsh-tool__zsh">
|
||||
<parameter name="command">./skills/SKILL_NAME/scripts/SCRIPT.sh [args]</parameter>
|
||||
<parameter name="pty">true</parameter>
|
||||
</invoke>
|
||||
### `array` — Array & Parity
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `parity_status` | Current parity check progress and status |
|
||||
| `parity_history` | Historical parity check results |
|
||||
| `parity_start` | Start a parity check |
|
||||
| `parity_pause` | Pause a running parity check |
|
||||
| `parity_resume` | Resume a paused parity check |
|
||||
| `parity_cancel` | Cancel a running parity check |
|
||||
| `start_array` | Start the array |
|
||||
| `stop_array` | ⚠️ Stop the array (requires `confirm=True`) |
|
||||
| `add_disk` | Add a disk to the array (requires `slot`, `id`) |
|
||||
| `remove_disk` | ⚠️ Remove a disk (requires `slot`, `confirm=True`) |
|
||||
| `mount_disk` | Mount a disk |
|
||||
| `unmount_disk` | Unmount a disk |
|
||||
| `clear_disk_stats` | ⚠️ Clear disk statistics (requires `confirm=True`) |
|
||||
|
||||
### `disk` — Storage & Logs
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `shares` | List network shares |
|
||||
| `disks` | All physical disks with health and temperatures |
|
||||
| `disk_details` | Detailed info for a specific disk (requires `disk_id`) |
|
||||
| `log_files` | List available log files |
|
||||
| `logs` | Read log content (requires `path`; optional `lines`) |
|
||||
| `flash_backup` | ⚠️ Trigger a flash backup (requires `confirm=True`) |
|
||||
|
||||
### `docker` — Containers
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list` | All containers with status, image, state |
|
||||
| `details` | Single container details (requires container identifier) |
|
||||
| `start` | Start a container (requires container identifier) |
|
||||
| `stop` | Stop a container (requires container identifier) |
|
||||
| `restart` | Restart a container (requires container identifier) |
|
||||
| `networks` | List Docker networks |
|
||||
| `network_details` | Details for a specific network (requires `network_id`) |
|
||||
|
||||
**Container Identification:** Name, ID, or partial name (fuzzy match supported).
|
||||
|
||||
### `vm` — Virtual Machines
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list` | All VMs with state |
|
||||
| `details` | Single VM details (requires `vm_id`) |
|
||||
| `start` | Start a VM (requires `vm_id`) |
|
||||
| `stop` | Gracefully stop a VM (requires `vm_id`) |
|
||||
| `pause` | Pause a VM (requires `vm_id`) |
|
||||
| `resume` | Resume a paused VM (requires `vm_id`) |
|
||||
| `reboot` | Reboot a VM (requires `vm_id`) |
|
||||
| `force_stop` | ⚠️ Force stop a VM (requires `vm_id`, `confirm=True`) |
|
||||
| `reset` | ⚠️ Hard reset a VM (requires `vm_id`, `confirm=True`) |
|
||||
|
||||
### `notification` — Notifications
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `overview` | Notification counts (unread, archived by type) |
|
||||
| `list` | List notifications (optional `filter`, `limit`, `offset`) |
|
||||
| `mark_unread` | Mark a notification as unread (requires `notification_id`) |
|
||||
| `create` | Create a notification (requires `title`, `subject`, `description`, `importance`) |
|
||||
| `archive` | Archive a notification (requires `notification_id`) |
|
||||
| `delete` | ⚠️ Delete a notification (requires `notification_id`, `notification_type`, `confirm=True`) |
|
||||
| `delete_archived` | ⚠️ Delete all archived (requires `confirm=True`) |
|
||||
| `archive_all` | Archive all unread notifications |
|
||||
| `archive_many` | Archive multiple (requires `ids` list) |
|
||||
| `unarchive_many` | Unarchive multiple (requires `ids` list) |
|
||||
| `unarchive_all` | Unarchive all archived notifications |
|
||||
| `recalculate` | Recalculate notification counts |
|
||||
|
||||
### `key` — API Keys
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list` | All API keys |
|
||||
| `get` | Single key details (requires `key_id`) |
|
||||
| `create` | Create a new key (requires `name`, `roles`) |
|
||||
| `update` | Update a key (requires `key_id`) |
|
||||
| `delete` | ⚠️ Delete a key (requires `key_id`, `confirm=True`) |
|
||||
| `add_role` | Add a role to a key (requires `key_id`, `role`) |
|
||||
| `remove_role` | Remove a role from a key (requires `key_id`, `role`) |
|
||||
|
||||
### `plugin` — Plugins
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list` | All installed plugins |
|
||||
| `add` | Install plugins (requires `names` — list of plugin names) |
|
||||
| `remove` | ⚠️ Uninstall plugins (requires `names` — list of plugin names, `confirm=True`) |
|
||||
|
||||
### `rclone` — Cloud Storage
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list_remotes` | List configured rclone remotes |
|
||||
| `config_form` | Get configuration form for a remote type |
|
||||
| `create_remote` | Create a new remote (requires `name`, `type`, `fields`) |
|
||||
| `delete_remote` | ⚠️ Delete a remote (requires `name`, `confirm=True`) |
|
||||
|
||||
### `setting` — System Settings
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `update` | Update system settings (requires `settings` object) |
|
||||
| `configure_ups` | ⚠️ Configure UPS settings (requires `confirm=True`) |
|
||||
|
||||
### `customization` — Theme & Appearance
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `theme` | Current theme settings |
|
||||
| `public_theme` | Public-facing theme |
|
||||
| `is_initial_setup` | Check if initial setup is complete |
|
||||
| `sso_enabled` | Check SSO status |
|
||||
| `set_theme` | Update theme (requires theme parameters) |
|
||||
|
||||
### `oidc` — SSO / OpenID Connect
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `providers` | List configured OIDC providers |
|
||||
| `provider` | Single provider details (requires `provider_id`) |
|
||||
| `configuration` | OIDC configuration |
|
||||
| `public_providers` | Public-facing provider list |
|
||||
| `validate_session` | Validate current SSO session |
|
||||
|
||||
### `user` — Current User
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `me` | Current authenticated user info |
|
||||
|
||||
### `live` — Real-Time Subscriptions
|
||||
These use persistent WebSocket connections. Returns a "connecting" placeholder on the first call — retry momentarily for live data.
|
||||
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `cpu` | Live CPU utilization |
|
||||
| `memory` | Live memory usage |
|
||||
| `cpu_telemetry` | Detailed CPU telemetry |
|
||||
| `array_state` | Live array state changes |
|
||||
| `parity_progress` | Live parity check progress |
|
||||
| `ups_status` | Live UPS status |
|
||||
| `notifications_overview` | Live notification counts |
|
||||
| `owner` | Live owner info |
|
||||
| `server_status` | Live server status |
|
||||
| `log_tail` | Live log tail stream |
|
||||
| `notification_feed` | Live notification feed |
|
||||
|
||||
---
|
||||
|
||||
## Destructive Actions
|
||||
|
||||
All require `confirm=True` as an explicit parameter. Without it, the action is blocked and elicitation is triggered.
|
||||
|
||||
| Domain | Subaction | Risk |
|
||||
|--------|-----------|------|
|
||||
| `array` | `stop_array` | Stops array while containers/VMs may use shares |
|
||||
| `array` | `remove_disk` | Removes disk from array |
|
||||
| `array` | `clear_disk_stats` | Clears disk statistics permanently |
|
||||
| `vm` | `force_stop` | Hard kills VM without graceful shutdown |
|
||||
| `vm` | `reset` | Hard resets VM |
|
||||
| `notification` | `delete` | Permanently deletes a notification |
|
||||
| `notification` | `delete_archived` | Permanently deletes all archived notifications |
|
||||
| `rclone` | `delete_remote` | Removes a cloud storage remote |
|
||||
| `key` | `delete` | Permanently deletes an API key |
|
||||
| `disk` | `flash_backup` | Triggers flash backup operation |
|
||||
| `setting` | `configure_ups` | Modifies UPS configuration |
|
||||
| `plugin` | `remove` | Uninstalls a plugin |
|
||||
|
||||
---
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### First-time setup
|
||||
```
|
||||
unraid(action="health", subaction="setup")
|
||||
unraid(action="health", subaction="check")
|
||||
```
|
||||
|
||||
### System health overview
|
||||
```
|
||||
unraid(action="system", subaction="overview")
|
||||
unraid(action="health", subaction="check")
|
||||
```
|
||||
|
||||
### Container management
|
||||
```
|
||||
unraid(action="docker", subaction="list")
|
||||
unraid(action="docker", subaction="details", container_id="plex")
|
||||
unraid(action="docker", subaction="restart", container_id="sonarr")
|
||||
```
|
||||
|
||||
### Array and disk status
|
||||
```
|
||||
unraid(action="array", subaction="parity_status")
|
||||
unraid(action="disk", subaction="disks")
|
||||
unraid(action="system", subaction="array")
|
||||
```
|
||||
|
||||
### Read logs
|
||||
```
|
||||
unraid(action="disk", subaction="log_files")
|
||||
unraid(action="disk", subaction="logs", path="syslog", lines=50)
|
||||
```
|
||||
|
||||
### Live monitoring
|
||||
```
|
||||
unraid(action="live", subaction="cpu")
|
||||
unraid(action="live", subaction="memory")
|
||||
unraid(action="live", subaction="array_state")
|
||||
```
|
||||
|
||||
### VM operations
|
||||
```
|
||||
unraid(action="vm", subaction="list")
|
||||
unraid(action="vm", subaction="start", vm_id="<id>")
|
||||
unraid(action="vm", subaction="force_stop", vm_id="<id>", confirm=True)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- **Rate limit:** 100 requests / 10 seconds
|
||||
- **Log path validation:** Only `/var/log/`, `/boot/logs/`, `/mnt/` prefixes accepted
|
||||
- **Container logs:** Docker container stdout/stderr are NOT accessible via API — use SSH + `docker logs`
|
||||
- **`arraySubscription`:** Known Unraid API bug — `live/array_state` may show "connecting" indefinitely
|
||||
- **Event-driven subs** (`notifications_overview`, `owner`, `server_status`, `ups_status`): Only populate cache on first real server event
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Unraid API - Complete Reference Guide
|
||||
|
||||
> **⚠️ DEVELOPER REFERENCE ONLY** — This file documents the raw GraphQL API schema for development and maintenance purposes (adding new queries/mutations). Do NOT use these curl/GraphQL examples for MCP tool usage. Use `unraid(action=..., subaction=...)` calls instead. See [`SKILL.md`](../SKILL.md) for the correct calling convention.
|
||||
|
||||
**Tested on:** Unraid 7.2 x86_64
|
||||
**Date:** 2026-01-21
|
||||
**API Type:** GraphQL
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
> **⚠️ DEVELOPER REFERENCE ONLY** — This file documents raw GraphQL endpoints for development purposes. For MCP tool usage, use `unraid(action=..., subaction=...)` calls as documented in `SKILL.md`.
|
||||
|
||||
# Unraid API Endpoints Reference
|
||||
|
||||
Complete list of available GraphQL read-only endpoints in Unraid 7.2+.
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
> **⚠️ DEVELOPER REFERENCE ONLY** — Full GraphQL SDL from live API introspection. Use this to verify field names and types when adding new queries/mutations to the MCP server. Not for runtime agent usage.
|
||||
|
||||
"""
|
||||
Indicates exactly one field must be supplied and this field must not be `null`.
|
||||
"""
|
||||
|
||||
@@ -1,219 +1,125 @@
|
||||
# Unraid API Quick Reference
|
||||
# Unraid MCP — Quick Reference
|
||||
|
||||
Quick reference for the most common Unraid GraphQL API queries.
|
||||
All operations use: `unraid(action="<domain>", subaction="<operation>", [params])`
|
||||
|
||||
## Setup
|
||||
## Most Common Operations
|
||||
|
||||
```bash
|
||||
# Set environment variables
|
||||
export UNRAID_URL="https://your-unraid-server/graphql"
|
||||
export UNRAID_API_KEY="your-api-key-here"
|
||||
### Health & Status
|
||||
|
||||
# Or use the helper script directly
|
||||
./scripts/unraid-query.sh -u "$UNRAID_URL" -k "$UNRAID_API_KEY" -q "{ online }"
|
||||
```python
|
||||
unraid(action="health", subaction="setup") # First-time credential setup
|
||||
unraid(action="health", subaction="check") # Full health check
|
||||
unraid(action="health", subaction="test_connection") # Quick connectivity test
|
||||
unraid(action="system", subaction="overview") # Complete server summary
|
||||
unraid(action="system", subaction="metrics") # CPU / RAM / I/O usage
|
||||
unraid(action="system", subaction="online") # Online status
|
||||
```
|
||||
|
||||
## Common Queries
|
||||
### Array & Disks
|
||||
|
||||
### System Status
|
||||
```graphql
|
||||
{
|
||||
online
|
||||
metrics {
|
||||
cpu { percentTotal }
|
||||
memory { total used free percentTotal }
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="system", subaction="array") # Array status overview
|
||||
unraid(action="disk", subaction="disks") # All disks with temps & health
|
||||
unraid(action="array", subaction="parity_status") # Current parity check
|
||||
unraid(action="array", subaction="parity_history") # Past parity results
|
||||
unraid(action="array", subaction="parity_start") # Start parity check
|
||||
unraid(action="array", subaction="stop_array", confirm=True) # ⚠️ Stop array
|
||||
```
|
||||
|
||||
### Array Status
|
||||
```graphql
|
||||
{
|
||||
array {
|
||||
state
|
||||
parityCheckStatus { status progress errors }
|
||||
}
|
||||
}
|
||||
```
|
||||
### Logs
|
||||
|
||||
### Disk List with Temperatures
|
||||
```graphql
|
||||
{
|
||||
array {
|
||||
disks {
|
||||
name
|
||||
device
|
||||
temp
|
||||
status
|
||||
fsSize
|
||||
fsFree
|
||||
isSpinning
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### All Physical Disks (including USB/SSDs)
|
||||
```graphql
|
||||
{
|
||||
disks {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Network Shares
|
||||
```graphql
|
||||
{
|
||||
shares {
|
||||
name
|
||||
comment
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="disk", subaction="log_files") # List available logs
|
||||
unraid(action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=50) # Read syslog
|
||||
unraid(action="live", subaction="log_tail", path="/var/log/syslog") # Live tail
|
||||
```
|
||||
|
||||
### Docker Containers
|
||||
```graphql
|
||||
{
|
||||
docker {
|
||||
containers {
|
||||
id
|
||||
names
|
||||
image
|
||||
state
|
||||
status
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```python
|
||||
unraid(action="docker", subaction="list")
|
||||
unraid(action="docker", subaction="details", container_id="plex")
|
||||
unraid(action="docker", subaction="start", container_id="nginx")
|
||||
unraid(action="docker", subaction="stop", container_id="nginx")
|
||||
unraid(action="docker", subaction="restart", container_id="sonarr")
|
||||
unraid(action="docker", subaction="networks")
|
||||
```
|
||||
|
||||
### Virtual Machines
|
||||
```graphql
|
||||
{
|
||||
vms {
|
||||
id
|
||||
name
|
||||
state
|
||||
cpus
|
||||
memory
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### List Log Files
|
||||
```graphql
|
||||
{
|
||||
logFiles {
|
||||
name
|
||||
size
|
||||
modifiedAt
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Read Log Content
|
||||
```graphql
|
||||
{
|
||||
logFile(path: "syslog", lines: 20) {
|
||||
content
|
||||
totalLines
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### System Info
|
||||
```graphql
|
||||
{
|
||||
info {
|
||||
time
|
||||
cpu { model cores threads }
|
||||
os { distro release }
|
||||
system { manufacturer model }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### UPS Devices
|
||||
```graphql
|
||||
{
|
||||
upsDevices {
|
||||
id
|
||||
name
|
||||
status
|
||||
charge
|
||||
load
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="vm", subaction="list")
|
||||
unraid(action="vm", subaction="details", vm_id="<id>")
|
||||
unraid(action="vm", subaction="start", vm_id="<id>")
|
||||
unraid(action="vm", subaction="stop", vm_id="<id>")
|
||||
unraid(action="vm", subaction="reboot", vm_id="<id>")
|
||||
unraid(action="vm", subaction="force_stop", vm_id="<id>", confirm=True) # ⚠️
|
||||
```
|
||||
|
||||
### Notifications
|
||||
|
||||
**Counts:**
|
||||
```graphql
|
||||
{
|
||||
notifications {
|
||||
overview {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="notification", subaction="overview")
|
||||
unraid(action="notification", subaction="list", list_type="UNREAD", limit=10)
|
||||
unraid(action="notification", subaction="archive", notification_id="<id>")
|
||||
unraid(action="notification", subaction="create", title="Test", subject="Subject",
|
||||
description="Body", importance="INFO")
|
||||
```
|
||||
|
||||
**List Unread:**
|
||||
```graphql
|
||||
{
|
||||
notifications {
|
||||
list(filter: { type: UNREAD, offset: 0, limit: 10 }) {
|
||||
id
|
||||
subject
|
||||
description
|
||||
timestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
### API Keys
|
||||
|
||||
```python
|
||||
unraid(action="key", subaction="list")
|
||||
unraid(action="key", subaction="create", name="my-key", roles=["viewer"])
|
||||
unraid(action="key", subaction="delete", key_id="<id>", confirm=True) # ⚠️
|
||||
```
|
||||
|
||||
**List Archived:**
|
||||
```graphql
|
||||
{
|
||||
notifications {
|
||||
list(filter: { type: ARCHIVE, offset: 0, limit: 10 }) {
|
||||
id
|
||||
subject
|
||||
description
|
||||
timestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
### Plugins
|
||||
|
||||
```python
|
||||
unraid(action="plugin", subaction="list")
|
||||
unraid(action="plugin", subaction="add", names=["community.applications"])
|
||||
unraid(action="plugin", subaction="remove", names=["old.plugin"], confirm=True) # ⚠️
|
||||
```
|
||||
|
||||
## Field Name Notes
|
||||
### rclone
|
||||
|
||||
- Use `metrics` for real-time usage (CPU/memory percentages)
|
||||
- Use `info` for hardware specs (cores, model, etc.)
|
||||
- Temperature field is `temp` (not `temperature`)
|
||||
- Status field is `state` for array (not `status`)
|
||||
- Sizes are in kilobytes
|
||||
- Temperatures are in Celsius
|
||||
|
||||
## Response Structure
|
||||
|
||||
All responses follow this pattern:
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"queryName": { ... }
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="rclone", subaction="list_remotes")
|
||||
unraid(action="rclone", subaction="delete_remote", name="<remote>", confirm=True) # ⚠️
|
||||
```
|
||||
|
||||
Errors appear in:
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
{ "message": "..." }
|
||||
]
|
||||
}
|
||||
### Live Subscriptions (real-time)
|
||||
|
||||
```python
|
||||
unraid(action="live", subaction="cpu")
|
||||
unraid(action="live", subaction="memory")
|
||||
unraid(action="live", subaction="parity_progress")
|
||||
unraid(action="live", subaction="log_tail")
|
||||
unraid(action="live", subaction="notification_feed")
|
||||
unraid(action="live", subaction="ups_status")
|
||||
```
|
||||
|
||||
> Returns `{"status": "connecting"}` on first call — retry momentarily.
|
||||
|
||||
---
|
||||
|
||||
## Domain → action= Mapping
|
||||
|
||||
| Old tool name (pre-v1.0) | New `action=` |
|
||||
|--------------------------|---------------|
|
||||
| `unraid_info` | `system` |
|
||||
| `unraid_health` | `health` |
|
||||
| `unraid_array` | `array` |
|
||||
| `unraid_storage` | `disk` |
|
||||
| `unraid_docker` | `docker` |
|
||||
| `unraid_vm` | `vm` |
|
||||
| `unraid_notifications` | `notification` |
|
||||
| `unraid_keys` | `key` |
|
||||
| `unraid_plugins` | `plugin` |
|
||||
| `unraid_rclone` | `rclone` |
|
||||
| `unraid_settings` | `setting` |
|
||||
| `unraid_customization` | `customization` |
|
||||
| `unraid_oidc` | `oidc` |
|
||||
| `unraid_users` | `user` |
|
||||
| `unraid_live` | `live` |
|
||||
|
||||
@@ -1,36 +1,109 @@
|
||||
# Unraid API Troubleshooting Guide
|
||||
# Unraid MCP — Troubleshooting Guide
|
||||
|
||||
Common issues and solutions when working with the Unraid GraphQL API.
|
||||
## Credentials Not Configured
|
||||
|
||||
## "Cannot query field" error
|
||||
**Error:** `CredentialsNotConfiguredError` or message containing `~/.unraid-mcp/.env`
|
||||
|
||||
Field name doesn't exist in your Unraid version. Use introspection to find valid fields:
|
||||
**Fix:** Run setup to configure credentials interactively:
|
||||
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ __type(name: \"TypeName\") { fields { name } } }"
|
||||
```python
|
||||
unraid(action="health", subaction="setup")
|
||||
```
|
||||
|
||||
## "API key validation failed"
|
||||
- Check API key is correct and not truncated
|
||||
- Verify key has appropriate permissions (use "Viewer" role)
|
||||
- Ensure URL includes `/graphql` endpoint (e.g. `http://host/graphql`)
|
||||
This writes `UNRAID_API_URL` and `UNRAID_API_KEY` to `~/.unraid-mcp/.env`. Re-run at any time to update or rotate credentials.
|
||||
|
||||
## Empty results
|
||||
Many queries return empty arrays when no data exists:
|
||||
- `docker.containers` - No containers running
|
||||
- `vms` - No VMs configured (or VM service disabled)
|
||||
- `notifications` - No active alerts
|
||||
- `plugins` - No plugins installed
|
||||
---
|
||||
|
||||
This is normal behavior, not an error. Ensure your scripts handle empty arrays gracefully.
|
||||
## Connection Failed / API Unreachable
|
||||
|
||||
## "VMs are not available" (GraphQL Error)
|
||||
If the VM manager is disabled in Unraid settings, querying `{ vms { ... } }` will return a GraphQL error.
|
||||
**Solution:** Check if VM service is enabled before querying, or use error handling (like `IGNORE_ERRORS=true` in dashboard scripts) to process partial data.
|
||||
**Symptoms:** Timeout, connection refused, network error
|
||||
|
||||
## URL connection issues
|
||||
- Use HTTPS (not HTTP) for remote access if configured
|
||||
- For local access: `http://unraid-server-ip/graphql`
|
||||
- For Unraid Connect: Use provided URL with token in hostname
|
||||
- Use `-k` (insecure) with curl if using self-signed certs on local HTTPS
|
||||
- Use `-L` (follow redirects) if Unraid redirects HTTP to HTTPS
|
||||
**Diagnostic steps:**
|
||||
|
||||
1. Test basic connectivity:
|
||||
|
||||
```python
|
||||
unraid(action="health", subaction="test_connection")
|
||||
```
|
||||
|
||||
1. Full diagnostic report:
|
||||
|
||||
```python
|
||||
unraid(action="health", subaction="diagnose")
|
||||
```
|
||||
|
||||
1. Check that `UNRAID_API_URL` in `~/.unraid-mcp/.env` points to the correct Unraid GraphQL endpoint.
|
||||
|
||||
1. Verify the API key has the required roles. Get a new key: **Unraid UI → Settings → Management Access → API Keys → Create** (select "Viewer" role for read-only, or appropriate roles for mutations).
|
||||
|
||||
---
|
||||
|
||||
## Invalid Action / Subaction
|
||||
|
||||
**Error:** `Invalid action 'X'` or `Invalid subaction 'X' for action 'Y'`
|
||||
|
||||
**Fix:** Check the domain table in `SKILL.md` for the exact `action=` and `subaction=` strings. Common mistakes:
|
||||
|
||||
| Wrong | Correct |
|
||||
|-------|---------|
|
||||
| `action="info"` | `action="system"` |
|
||||
| `action="notifications"` | `action="notification"` |
|
||||
| `action="keys"` | `action="key"` |
|
||||
| `action="plugins"` | `action="plugin"` |
|
||||
| `action="settings"` | `action="setting"` |
|
||||
| `subaction="unread"` | `subaction="mark_unread"` |
|
||||
|
||||
---
|
||||
|
||||
## Destructive Action Blocked
|
||||
|
||||
**Error:** `Action 'X' was not confirmed. Re-run with confirm=True to bypass elicitation.`
|
||||
|
||||
**Fix:** Add `confirm=True` to the call:
|
||||
|
||||
```python
|
||||
unraid(action="array", subaction="stop_array", confirm=True)
|
||||
unraid(action="vm", subaction="force_stop", vm_id="<id>", confirm=True)
|
||||
```
|
||||
|
||||
See the Destructive Actions table in `SKILL.md` for the full list.
|
||||
|
||||
---
|
||||
|
||||
## Live Subscription Returns "Connecting"
|
||||
|
||||
**Symptoms:** `unraid(action="live", ...)` returns `{"status": "connecting"}`
|
||||
|
||||
**Explanation:** The persistent WebSocket subscription has not yet received its first event. Retry in a moment.
|
||||
|
||||
**Known issue:** `live/array_state` uses `arraySubscription` which has a known Unraid API bug (returns null for a non-nullable field). This subscription will always show "connecting."
|
||||
|
||||
**Event-driven subscriptions** (`live/notifications_overview`, `live/owner`, `live/server_status`, `live/ups_status`) only populate when the server emits a change event. If the server is idle, these may never populate during a session.
|
||||
|
||||
**Workaround for array state:** Use `unraid(action="system", subaction="array")` for a synchronous snapshot instead.
|
||||
|
||||
---
|
||||
|
||||
## Rate Limit Exceeded
|
||||
|
||||
**Limit:** 100 requests / 10 seconds
|
||||
|
||||
**Symptoms:** HTTP 429 or rate limit error
|
||||
|
||||
**Fix:** Space out requests. Avoid polling in tight loops. Use `live/` subscriptions for real-time data instead of polling `system/metrics` repeatedly.
|
||||
|
||||
---
|
||||
|
||||
## Log Path Rejected
|
||||
|
||||
**Error:** `Invalid log path`
|
||||
|
||||
**Valid log path prefixes:** `/var/log/`, `/boot/logs/`, `/mnt/`
|
||||
|
||||
Use `unraid(action="disk", subaction="log_files")` to list available logs before reading.
|
||||
|
||||
---
|
||||
|
||||
## Container Logs Not Available
|
||||
|
||||
Docker container stdout/stderr are **not accessible via the Unraid API**. SSH to the Unraid server and use `docker logs <container>` directly.
|
||||
|
||||
@@ -70,7 +70,7 @@ class DockerMutationResult(BaseModel):
|
||||
"""Shape returned by docker start/stop/pause/unpause mutations."""
|
||||
|
||||
success: bool
|
||||
action: str
|
||||
subaction: str
|
||||
container: Any = None
|
||||
|
||||
|
||||
@@ -287,48 +287,42 @@ class NotificationCreateResult(BaseModel):
|
||||
|
||||
@pytest.fixture
|
||||
def _docker_mock() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _info_mock() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.info.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _storage_mock() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.storage.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _notifications_mock() -> Generator[AsyncMock, None, None]:
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _docker_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
def _info_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
def _storage_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
def _notifications_tool():
|
||||
return make_tool_fn(
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
)
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -341,7 +335,7 @@ class TestDockerListContract:
|
||||
|
||||
async def test_list_result_has_containers_key(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {"docker": {"containers": []}}
|
||||
result = await _docker_tool()(action="list")
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
DockerListResult(**result)
|
||||
|
||||
async def test_list_containers_conform_to_shape(self, _docker_mock: AsyncMock) -> None:
|
||||
@@ -353,14 +347,14 @@ class TestDockerListContract:
|
||||
]
|
||||
}
|
||||
}
|
||||
result = await _docker_tool()(action="list")
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
validated = DockerListResult(**result)
|
||||
for container in validated.containers:
|
||||
DockerContainer(**container)
|
||||
|
||||
async def test_list_empty_containers_is_valid(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {"docker": {"containers": []}}
|
||||
result = await _docker_tool()(action="list")
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
validated = DockerListResult(**result)
|
||||
assert validated.containers == []
|
||||
|
||||
@@ -369,7 +363,7 @@ class TestDockerListContract:
|
||||
_docker_mock.return_value = {
|
||||
"docker": {"containers": [{"id": "abc123", "names": ["plex"], "state": "running"}]}
|
||||
}
|
||||
result = await _docker_tool()(action="list")
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
container_raw = result["containers"][0]
|
||||
DockerContainer(**container_raw)
|
||||
|
||||
@@ -378,7 +372,7 @@ class TestDockerListContract:
|
||||
_docker_mock.return_value = {
|
||||
"docker": {"containers": [{"id": "abc123", "state": "running"}]}
|
||||
}
|
||||
result = await _docker_tool()(action="list")
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
with pytest.raises(ValidationError):
|
||||
DockerContainer(**result["containers"][0])
|
||||
|
||||
@@ -403,7 +397,7 @@ class TestDockerDetailsContract:
|
||||
]
|
||||
}
|
||||
}
|
||||
result = await _docker_tool()(action="details", container_id=cid)
|
||||
result = await _docker_tool()(action="docker", subaction="details", container_id=cid)
|
||||
DockerContainerDetails(**result)
|
||||
|
||||
async def test_details_has_required_fields(self, _docker_mock: AsyncMock) -> None:
|
||||
@@ -411,7 +405,7 @@ class TestDockerDetailsContract:
|
||||
_docker_mock.return_value = {
|
||||
"docker": {"containers": [{"id": cid, "names": ["sonarr"], "state": "exited"}]}
|
||||
}
|
||||
result = await _docker_tool()(action="details", container_id=cid)
|
||||
result = await _docker_tool()(action="docker", subaction="details", container_id=cid)
|
||||
assert "id" in result
|
||||
assert "names" in result
|
||||
assert "state" in result
|
||||
@@ -424,7 +418,7 @@ class TestDockerNetworksContract:
|
||||
_docker_mock.return_value = {
|
||||
"docker": {"networks": [{"id": "net:1", "name": "bridge", "driver": "bridge"}]}
|
||||
}
|
||||
result = await _docker_tool()(action="networks")
|
||||
result = await _docker_tool()(action="docker", subaction="networks")
|
||||
DockerNetworkListResult(**result)
|
||||
|
||||
async def test_network_entries_conform_to_shape(self, _docker_mock: AsyncMock) -> None:
|
||||
@@ -436,13 +430,13 @@ class TestDockerNetworksContract:
|
||||
]
|
||||
}
|
||||
}
|
||||
result = await _docker_tool()(action="networks")
|
||||
result = await _docker_tool()(action="docker", subaction="networks")
|
||||
for net in result["networks"]:
|
||||
DockerNetwork(**net)
|
||||
|
||||
async def test_empty_networks_is_valid(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {"docker": {"networks": []}}
|
||||
result = await _docker_tool()(action="networks")
|
||||
result = await _docker_tool()(action="docker", subaction="networks")
|
||||
validated = DockerNetworkListResult(**result)
|
||||
assert validated.networks == []
|
||||
|
||||
@@ -456,10 +450,10 @@ class TestDockerMutationContract:
|
||||
{"docker": {"containers": [{"id": cid, "names": ["plex"]}]}},
|
||||
{"docker": {"start": {"id": cid, "names": ["plex"], "state": "running"}}},
|
||||
]
|
||||
result = await _docker_tool()(action="start", container_id=cid)
|
||||
result = await _docker_tool()(action="docker", subaction="start", container_id=cid)
|
||||
validated = DockerMutationResult(**result)
|
||||
assert validated.success is True
|
||||
assert validated.action == "start"
|
||||
assert validated.subaction == "start"
|
||||
|
||||
async def test_stop_mutation_result_shape(self, _docker_mock: AsyncMock) -> None:
|
||||
cid = "d" * 64 + ":local"
|
||||
@@ -467,10 +461,10 @@ class TestDockerMutationContract:
|
||||
{"docker": {"containers": [{"id": cid, "names": ["nginx"]}]}},
|
||||
{"docker": {"stop": {"id": cid, "names": ["nginx"], "state": "exited"}}},
|
||||
]
|
||||
result = await _docker_tool()(action="stop", container_id=cid)
|
||||
result = await _docker_tool()(action="docker", subaction="stop", container_id=cid)
|
||||
validated = DockerMutationResult(**result)
|
||||
assert validated.success is True
|
||||
assert validated.action == "stop"
|
||||
assert validated.subaction == "stop"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -501,7 +495,7 @@ class TestInfoOverviewContract:
|
||||
"memory": {"layout": []},
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="overview")
|
||||
result = await _info_tool()(action="system", subaction="overview")
|
||||
validated = InfoOverviewResult(**result)
|
||||
assert isinstance(validated.summary, dict)
|
||||
assert isinstance(validated.details, dict)
|
||||
@@ -521,7 +515,7 @@ class TestInfoOverviewContract:
|
||||
"memory": {"layout": []},
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="overview")
|
||||
result = await _info_tool()(action="system", subaction="overview")
|
||||
InfoOverviewSummary(**result["summary"])
|
||||
assert result["summary"]["hostname"] == "myserver"
|
||||
|
||||
@@ -538,7 +532,7 @@ class TestInfoOverviewContract:
|
||||
"memory": {"layout": []},
|
||||
}
|
||||
_info_mock.return_value = {"info": raw_info}
|
||||
result = await _info_tool()(action="overview")
|
||||
result = await _info_tool()(action="system", subaction="overview")
|
||||
assert result["details"] == raw_info
|
||||
|
||||
|
||||
@@ -557,7 +551,7 @@ class TestInfoArrayContract:
|
||||
"boot": None,
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="array")
|
||||
result = await _info_tool()(action="system", subaction="array")
|
||||
validated = InfoArrayResult(**result)
|
||||
assert isinstance(validated.summary, dict)
|
||||
assert isinstance(validated.details, dict)
|
||||
@@ -572,7 +566,7 @@ class TestInfoArrayContract:
|
||||
"caches": [],
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="array")
|
||||
result = await _info_tool()(action="system", subaction="array")
|
||||
ArraySummary(**result["summary"])
|
||||
|
||||
async def test_array_health_overall_healthy(self, _info_mock: AsyncMock) -> None:
|
||||
@@ -585,7 +579,7 @@ class TestInfoArrayContract:
|
||||
"caches": [],
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="array")
|
||||
result = await _info_tool()(action="system", subaction="array")
|
||||
assert result["summary"]["overall_health"] == "HEALTHY"
|
||||
|
||||
async def test_array_health_critical_with_failed_disk(self, _info_mock: AsyncMock) -> None:
|
||||
@@ -598,7 +592,7 @@ class TestInfoArrayContract:
|
||||
"caches": [],
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="array")
|
||||
result = await _info_tool()(action="system", subaction="array")
|
||||
assert result["summary"]["overall_health"] == "CRITICAL"
|
||||
|
||||
|
||||
@@ -619,7 +613,7 @@ class TestInfoMetricsContract:
|
||||
},
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="metrics")
|
||||
result = await _info_tool()(action="system", subaction="metrics")
|
||||
validated = InfoMetricsResult(**result)
|
||||
assert validated.cpu is not None
|
||||
assert validated.memory is not None
|
||||
@@ -628,7 +622,7 @@ class TestInfoMetricsContract:
|
||||
_info_mock.return_value = {
|
||||
"metrics": {"cpu": {"percentTotal": 75.3}, "memory": {"percentTotal": 60.0}}
|
||||
}
|
||||
result = await _info_tool()(action="metrics")
|
||||
result = await _info_tool()(action="system", subaction="metrics")
|
||||
cpu_pct = result["cpu"]["percentTotal"]
|
||||
assert 0.0 <= cpu_pct <= 100.0
|
||||
|
||||
@@ -643,14 +637,14 @@ class TestInfoServicesContract:
|
||||
{"name": "docker", "online": True, "version": "24.0"},
|
||||
]
|
||||
}
|
||||
result = await _info_tool()(action="services")
|
||||
result = await _info_tool()(action="system", subaction="services")
|
||||
validated = InfoServicesResult(**result)
|
||||
for svc in validated.services:
|
||||
ServiceEntry(**svc)
|
||||
|
||||
async def test_services_empty_list_is_valid(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {"services": []}
|
||||
result = await _info_tool()(action="services")
|
||||
result = await _info_tool()(action="system", subaction="services")
|
||||
InfoServicesResult(**result)
|
||||
assert result["services"] == []
|
||||
|
||||
@@ -660,13 +654,13 @@ class TestInfoOnlineContract:
|
||||
|
||||
async def test_online_true_shape(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {"online": True}
|
||||
result = await _info_tool()(action="online")
|
||||
result = await _info_tool()(action="system", subaction="online")
|
||||
validated = InfoOnlineResult(**result)
|
||||
assert validated.online is True
|
||||
|
||||
async def test_online_false_shape(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {"online": False}
|
||||
result = await _info_tool()(action="online")
|
||||
result = await _info_tool()(action="system", subaction="online")
|
||||
validated = InfoOnlineResult(**result)
|
||||
assert validated.online is False
|
||||
|
||||
@@ -687,7 +681,7 @@ class TestInfoNetworkContract:
|
||||
],
|
||||
"vars": {"port": 80, "portssl": 443, "localTld": "local", "useSsl": "no"},
|
||||
}
|
||||
result = await _info_tool()(action="network")
|
||||
result = await _info_tool()(action="system", subaction="network")
|
||||
validated = InfoNetworkResult(**result)
|
||||
assert isinstance(validated.accessUrls, list)
|
||||
|
||||
@@ -696,7 +690,7 @@ class TestInfoNetworkContract:
|
||||
"servers": [],
|
||||
"vars": {"port": 80, "portssl": 443, "localTld": "local", "useSsl": "no"},
|
||||
}
|
||||
result = await _info_tool()(action="network")
|
||||
result = await _info_tool()(action="system", subaction="network")
|
||||
validated = InfoNetworkResult(**result)
|
||||
assert validated.accessUrls == []
|
||||
|
||||
@@ -716,21 +710,21 @@ class TestStorageSharesContract:
|
||||
{"id": "share:2", "name": "appdata", "free": 200000, "used": 50000, "size": 250000},
|
||||
]
|
||||
}
|
||||
result = await _storage_tool()(action="shares")
|
||||
result = await _storage_tool()(action="disk", subaction="shares")
|
||||
validated = StorageSharesResult(**result)
|
||||
for share in validated.shares:
|
||||
ShareEntry(**share)
|
||||
|
||||
async def test_shares_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {"shares": []}
|
||||
result = await _storage_tool()(action="shares")
|
||||
result = await _storage_tool()(action="disk", subaction="shares")
|
||||
StorageSharesResult(**result)
|
||||
assert result["shares"] == []
|
||||
|
||||
async def test_shares_missing_name_fails_contract(self, _storage_mock: AsyncMock) -> None:
|
||||
"""A share without required 'name' must fail contract validation."""
|
||||
_storage_mock.return_value = {"shares": [{"id": "share:1", "free": 100}]}
|
||||
result = await _storage_tool()(action="shares")
|
||||
result = await _storage_tool()(action="disk", subaction="shares")
|
||||
with pytest.raises(ValidationError):
|
||||
ShareEntry(**result["shares"][0])
|
||||
|
||||
@@ -745,14 +739,14 @@ class TestStorageDisksContract:
|
||||
{"id": "disk:2", "device": "sdb", "name": "Seagate_8TB"},
|
||||
]
|
||||
}
|
||||
result = await _storage_tool()(action="disks")
|
||||
result = await _storage_tool()(action="disk", subaction="disks")
|
||||
validated = StorageDisksResult(**result)
|
||||
for disk in validated.disks:
|
||||
DiskEntry(**disk)
|
||||
|
||||
async def test_disks_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {"disks": []}
|
||||
result = await _storage_tool()(action="disks")
|
||||
result = await _storage_tool()(action="disk", subaction="disks")
|
||||
StorageDisksResult(**result)
|
||||
assert result["disks"] == []
|
||||
|
||||
@@ -771,7 +765,7 @@ class TestStorageDiskDetailsContract:
|
||||
"temperature": 35,
|
||||
}
|
||||
}
|
||||
result = await _storage_tool()(action="disk_details", disk_id="disk:1")
|
||||
result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:1")
|
||||
validated = StorageDiskDetailsResult(**result)
|
||||
assert isinstance(validated.summary, dict)
|
||||
assert isinstance(validated.details, dict)
|
||||
@@ -787,7 +781,7 @@ class TestStorageDiskDetailsContract:
|
||||
"temperature": 40,
|
||||
}
|
||||
}
|
||||
result = await _storage_tool()(action="disk_details", disk_id="disk:2")
|
||||
result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:2")
|
||||
DiskDetailsSummary(**result["summary"])
|
||||
|
||||
async def test_disk_details_temperature_formatted(self, _storage_mock: AsyncMock) -> None:
|
||||
@@ -801,7 +795,7 @@ class TestStorageDiskDetailsContract:
|
||||
"temperature": 38,
|
||||
}
|
||||
}
|
||||
result = await _storage_tool()(action="disk_details", disk_id="disk:3")
|
||||
result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:3")
|
||||
assert "°C" in result["summary"]["temperature"]
|
||||
|
||||
async def test_disk_details_no_temperature_shows_na(self, _storage_mock: AsyncMock) -> None:
|
||||
@@ -815,7 +809,7 @@ class TestStorageDiskDetailsContract:
|
||||
"temperature": None,
|
||||
}
|
||||
}
|
||||
result = await _storage_tool()(action="disk_details", disk_id="disk:4")
|
||||
result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:4")
|
||||
assert result["summary"]["temperature"] == "N/A"
|
||||
|
||||
|
||||
@@ -839,14 +833,14 @@ class TestStorageLogFilesContract:
|
||||
},
|
||||
]
|
||||
}
|
||||
result = await _storage_tool()(action="log_files")
|
||||
result = await _storage_tool()(action="disk", subaction="log_files")
|
||||
validated = StorageLogFilesResult(**result)
|
||||
for log_file in validated.log_files:
|
||||
LogFileEntry(**log_file)
|
||||
|
||||
async def test_log_files_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {"logFiles": []}
|
||||
result = await _storage_tool()(action="log_files")
|
||||
result = await _storage_tool()(action="disk", subaction="log_files")
|
||||
StorageLogFilesResult(**result)
|
||||
assert result["log_files"] == []
|
||||
|
||||
@@ -868,7 +862,7 @@ class TestNotificationsOverviewContract:
|
||||
}
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(action="overview")
|
||||
result = await _notifications_tool()(action="notification", subaction="overview")
|
||||
validated = NotificationOverviewResult(**result)
|
||||
assert validated.unread is not None
|
||||
assert validated.archive is not None
|
||||
@@ -882,7 +876,7 @@ class TestNotificationsOverviewContract:
|
||||
}
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(action="overview")
|
||||
result = await _notifications_tool()(action="notification", subaction="overview")
|
||||
NotificationCountBucket(**result["unread"])
|
||||
NotificationCountBucket(**result["archive"])
|
||||
|
||||
@@ -895,7 +889,7 @@ class TestNotificationsOverviewContract:
|
||||
}
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(action="overview")
|
||||
result = await _notifications_tool()(action="notification", subaction="overview")
|
||||
NotificationOverviewResult(**result)
|
||||
|
||||
|
||||
@@ -920,14 +914,14 @@ class TestNotificationsListContract:
|
||||
]
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(action="list")
|
||||
result = await _notifications_tool()(action="notification", subaction="list")
|
||||
validated = NotificationListResult(**result)
|
||||
for notif in validated.notifications:
|
||||
NotificationEntry(**notif)
|
||||
|
||||
async def test_list_empty_notifications_valid(self, _notifications_mock: AsyncMock) -> None:
|
||||
_notifications_mock.return_value = {"notifications": {"list": []}}
|
||||
result = await _notifications_tool()(action="list")
|
||||
result = await _notifications_tool()(action="notification", subaction="list")
|
||||
NotificationListResult(**result)
|
||||
assert result["notifications"] == []
|
||||
|
||||
@@ -938,7 +932,7 @@ class TestNotificationsListContract:
|
||||
_notifications_mock.return_value = {
|
||||
"notifications": {"list": [{"title": "No ID here", "importance": "INFO"}]}
|
||||
}
|
||||
result = await _notifications_tool()(action="list")
|
||||
result = await _notifications_tool()(action="notification", subaction="list")
|
||||
with pytest.raises(ValidationError):
|
||||
NotificationEntry(**result["notifications"][0])
|
||||
|
||||
@@ -955,7 +949,8 @@ class TestNotificationsCreateContract:
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Test notification",
|
||||
subject="Test subject",
|
||||
description="This is a test",
|
||||
@@ -970,7 +965,8 @@ class TestNotificationsCreateContract:
|
||||
"createNotification": {"id": "notif:42", "title": "Alert!", "importance": "ALERT"}
|
||||
}
|
||||
result = await _notifications_tool()(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Alert!",
|
||||
subject="Critical issue",
|
||||
description="Something went wrong",
|
||||
|
||||
@@ -261,11 +261,11 @@ class TestGraphQLErrorHandling:
|
||||
|
||||
|
||||
class TestInfoToolRequests:
|
||||
"""Verify unraid_info tool constructs correct GraphQL queries."""
|
||||
"""Verify unraid system tool constructs correct GraphQL queries."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_overview_sends_correct_query(self) -> None:
|
||||
@@ -281,7 +281,7 @@ class TestInfoToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="overview")
|
||||
await tool(action="system", subaction="overview")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetSystemInfo" in body["query"]
|
||||
assert "info" in body["query"]
|
||||
@@ -292,7 +292,7 @@ class TestInfoToolRequests:
|
||||
return_value=_graphql_response({"array": {"state": "STARTED", "capacity": {}}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="array")
|
||||
await tool(action="system", subaction="array")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetArrayStatus" in body["query"]
|
||||
|
||||
@@ -302,7 +302,7 @@ class TestInfoToolRequests:
|
||||
return_value=_graphql_response({"network": {"id": "n1", "accessUrls": []}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="network")
|
||||
await tool(action="system", subaction="network")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetNetworkInfo" in body["query"]
|
||||
|
||||
@@ -314,7 +314,7 @@ class TestInfoToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="metrics")
|
||||
await tool(action="system", subaction="metrics")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetMetrics" in body["query"]
|
||||
|
||||
@@ -324,7 +324,7 @@ class TestInfoToolRequests:
|
||||
return_value=_graphql_response({"upsDeviceById": {"id": "ups1", "model": "APC"}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="ups_device", device_id="ups1")
|
||||
await tool(action="system", subaction="ups_device", device_id="ups1")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert body["variables"] == {"id": "ups1"}
|
||||
assert "GetUpsDevice" in body["query"]
|
||||
@@ -333,7 +333,7 @@ class TestInfoToolRequests:
|
||||
async def test_online_sends_correct_query(self) -> None:
|
||||
route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True}))
|
||||
tool = self._get_tool()
|
||||
await tool(action="online")
|
||||
await tool(action="system", subaction="online")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetOnline" in body["query"]
|
||||
|
||||
@@ -343,7 +343,7 @@ class TestInfoToolRequests:
|
||||
return_value=_graphql_response({"servers": [{"id": "s1", "name": "tower"}]})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="servers")
|
||||
await tool(action="system", subaction="servers")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetServers" in body["query"]
|
||||
|
||||
@@ -353,7 +353,7 @@ class TestInfoToolRequests:
|
||||
return_value=_graphql_response({"flash": {"id": "f1", "guid": "abc"}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="flash")
|
||||
await tool(action="system", subaction="flash")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetFlash" in body["query"]
|
||||
|
||||
@@ -364,11 +364,11 @@ class TestInfoToolRequests:
|
||||
|
||||
|
||||
class TestDockerToolRequests:
|
||||
"""Verify unraid_docker tool constructs correct requests."""
|
||||
"""Verify unraid docker tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_list_sends_correct_query(self) -> None:
|
||||
@@ -378,7 +378,7 @@ class TestDockerToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="list")
|
||||
await tool(action="docker", subaction="list")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ListDockerContainers" in body["query"]
|
||||
|
||||
@@ -400,7 +400,7 @@ class TestDockerToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="start", container_id=container_id)
|
||||
await tool(action="docker", subaction="start", container_id=container_id)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "StartContainer" in body["query"]
|
||||
assert body["variables"] == {"id": container_id}
|
||||
@@ -423,7 +423,7 @@ class TestDockerToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="stop", container_id=container_id)
|
||||
await tool(action="docker", subaction="stop", container_id=container_id)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "StopContainer" in body["query"]
|
||||
assert body["variables"] == {"id": container_id}
|
||||
@@ -440,7 +440,7 @@ class TestDockerToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="networks")
|
||||
await tool(action="docker", subaction="networks")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetDockerNetworks" in body["query"]
|
||||
|
||||
@@ -484,9 +484,9 @@ class TestDockerToolRequests:
|
||||
|
||||
respx.post(API_URL).mock(side_effect=side_effect)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="restart", container_id=container_id)
|
||||
result = await tool(action="docker", subaction="restart", container_id=container_id)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "restart"
|
||||
assert result["subaction"] == "restart"
|
||||
assert call_count == 2
|
||||
|
||||
@respx.mock
|
||||
@@ -499,7 +499,8 @@ class TestDockerToolRequests:
|
||||
nonlocal call_count
|
||||
body = json.loads(request.content.decode())
|
||||
call_count += 1
|
||||
if "ResolveContainerID" in body["query"]:
|
||||
if "skipCache" in body["query"]:
|
||||
# Resolution query: docker { containers(skipCache: true) { id names } }
|
||||
return _graphql_response(
|
||||
{"docker": {"containers": [{"id": resolved_id, "names": ["plex"]}]}}
|
||||
)
|
||||
@@ -520,7 +521,7 @@ class TestDockerToolRequests:
|
||||
|
||||
respx.post(API_URL).mock(side_effect=side_effect)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="start", container_id="plex")
|
||||
result = await tool(action="docker", subaction="start", container_id="plex")
|
||||
assert call_count == 2 # resolve + start
|
||||
assert result["success"] is True
|
||||
|
||||
@@ -531,11 +532,11 @@ class TestDockerToolRequests:
|
||||
|
||||
|
||||
class TestVMToolRequests:
|
||||
"""Verify unraid_vm tool constructs correct requests."""
|
||||
"""Verify unraid vm tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_list_sends_correct_query(self) -> None:
|
||||
@@ -549,7 +550,7 @@ class TestVMToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="list")
|
||||
result = await tool(action="vm", subaction="list")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ListVMs" in body["query"]
|
||||
assert "vms" in result
|
||||
@@ -558,7 +559,7 @@ class TestVMToolRequests:
|
||||
async def test_start_sends_mutation_with_id(self) -> None:
|
||||
route = respx.post(API_URL).mock(return_value=_graphql_response({"vm": {"start": True}}))
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="start", vm_id="vm-123")
|
||||
result = await tool(action="vm", subaction="start", vm_id="vm-123")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "StartVM" in body["query"]
|
||||
assert body["variables"] == {"id": "vm-123"}
|
||||
@@ -568,7 +569,7 @@ class TestVMToolRequests:
|
||||
async def test_stop_sends_mutation_with_id(self) -> None:
|
||||
route = respx.post(API_URL).mock(return_value=_graphql_response({"vm": {"stop": True}}))
|
||||
tool = self._get_tool()
|
||||
await tool(action="stop", vm_id="vm-456")
|
||||
await tool(action="vm", subaction="stop", vm_id="vm-456")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "StopVM" in body["query"]
|
||||
assert body["variables"] == {"id": "vm-456"}
|
||||
@@ -576,8 +577,8 @@ class TestVMToolRequests:
|
||||
@respx.mock
|
||||
async def test_force_stop_requires_confirm(self) -> None:
|
||||
tool = self._get_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool(action="force_stop", vm_id="vm-789")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool(action="vm", subaction="force_stop", vm_id="vm-789")
|
||||
|
||||
@respx.mock
|
||||
async def test_force_stop_sends_mutation_when_confirmed(self) -> None:
|
||||
@@ -585,7 +586,7 @@ class TestVMToolRequests:
|
||||
return_value=_graphql_response({"vm": {"forceStop": True}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="force_stop", vm_id="vm-789", confirm=True)
|
||||
result = await tool(action="vm", subaction="force_stop", vm_id="vm-789", confirm=True)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ForceStopVM" in body["query"]
|
||||
assert result["success"] is True
|
||||
@@ -593,8 +594,8 @@ class TestVMToolRequests:
|
||||
@respx.mock
|
||||
async def test_reset_requires_confirm(self) -> None:
|
||||
tool = self._get_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool(action="reset", vm_id="vm-abc")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool(action="vm", subaction="reset", vm_id="vm-abc")
|
||||
|
||||
@respx.mock
|
||||
async def test_details_finds_vm_by_name(self) -> None:
|
||||
@@ -611,7 +612,7 @@ class TestVMToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="details", vm_id="ubuntu")
|
||||
result = await tool(action="vm", subaction="details", vm_id="ubuntu")
|
||||
assert result["name"] == "ubuntu"
|
||||
|
||||
|
||||
@@ -621,11 +622,11 @@ class TestVMToolRequests:
|
||||
|
||||
|
||||
class TestArrayToolRequests:
|
||||
"""Verify unraid_array tool constructs correct requests."""
|
||||
"""Verify unraid array tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.array", "register_array_tool", "unraid_array")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_parity_status_sends_correct_query(self) -> None:
|
||||
@@ -643,7 +644,7 @@ class TestArrayToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="parity_status")
|
||||
result = await tool(action="array", subaction="parity_status")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetParityStatus" in body["query"]
|
||||
assert result["success"] is True
|
||||
@@ -654,7 +655,7 @@ class TestArrayToolRequests:
|
||||
return_value=_graphql_response({"parityCheck": {"start": True}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="parity_start", correct=False)
|
||||
result = await tool(action="array", subaction="parity_start", correct=False)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "StartParityCheck" in body["query"]
|
||||
assert body["variables"] == {"correct": False}
|
||||
@@ -666,7 +667,7 @@ class TestArrayToolRequests:
|
||||
return_value=_graphql_response({"parityCheck": {"start": True}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="parity_start", correct=True)
|
||||
await tool(action="array", subaction="parity_start", correct=True)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert body["variables"] == {"correct": True}
|
||||
|
||||
@@ -676,7 +677,7 @@ class TestArrayToolRequests:
|
||||
return_value=_graphql_response({"parityCheck": {"pause": True}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="parity_pause")
|
||||
await tool(action="array", subaction="parity_pause")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "PauseParityCheck" in body["query"]
|
||||
|
||||
@@ -686,7 +687,7 @@ class TestArrayToolRequests:
|
||||
return_value=_graphql_response({"parityCheck": {"cancel": True}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="parity_cancel")
|
||||
await tool(action="array", subaction="parity_cancel")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "CancelParityCheck" in body["query"]
|
||||
|
||||
@@ -697,11 +698,11 @@ class TestArrayToolRequests:
|
||||
|
||||
|
||||
class TestStorageToolRequests:
|
||||
"""Verify unraid_storage tool constructs correct requests."""
|
||||
"""Verify unraid disk tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_shares_sends_correct_query(self) -> None:
|
||||
@@ -709,7 +710,7 @@ class TestStorageToolRequests:
|
||||
return_value=_graphql_response({"shares": [{"id": "s1", "name": "appdata"}]})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="shares")
|
||||
result = await tool(action="disk", subaction="shares")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetSharesInfo" in body["query"]
|
||||
assert "shares" in result
|
||||
@@ -722,7 +723,7 @@ class TestStorageToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="disks")
|
||||
await tool(action="disk", subaction="disks")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ListPhysicalDisks" in body["query"]
|
||||
|
||||
@@ -743,7 +744,7 @@ class TestStorageToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="disk_details", disk_id="d1")
|
||||
await tool(action="disk", subaction="disk_details", disk_id="d1")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetDiskDetails" in body["query"]
|
||||
assert body["variables"] == {"id": "d1"}
|
||||
@@ -756,7 +757,7 @@ class TestStorageToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="log_files")
|
||||
result = await tool(action="disk", subaction="log_files")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ListLogFiles" in body["query"]
|
||||
assert "log_files" in result
|
||||
@@ -776,7 +777,7 @@ class TestStorageToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="logs", log_path="/var/log/syslog", tail_lines=50)
|
||||
await tool(action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=50)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetLogContent" in body["query"]
|
||||
assert body["variables"]["path"] == "/var/log/syslog"
|
||||
@@ -786,7 +787,7 @@ class TestStorageToolRequests:
|
||||
async def test_logs_rejects_path_traversal(self) -> None:
|
||||
tool = self._get_tool()
|
||||
with pytest.raises(ToolError, match="log_path must start with"):
|
||||
await tool(action="logs", log_path="/etc/shadow")
|
||||
await tool(action="disk", subaction="logs", log_path="/etc/shadow")
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
@@ -795,15 +796,11 @@ class TestStorageToolRequests:
|
||||
|
||||
|
||||
class TestNotificationsToolRequests:
|
||||
"""Verify unraid_notifications tool constructs correct requests."""
|
||||
"""Verify unraid notification tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn(
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
)
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_overview_sends_correct_query(self) -> None:
|
||||
@@ -819,7 +816,7 @@ class TestNotificationsToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="overview")
|
||||
await tool(action="notification", subaction="overview")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetNotificationsOverview" in body["query"]
|
||||
|
||||
@@ -829,7 +826,14 @@ class TestNotificationsToolRequests:
|
||||
return_value=_graphql_response({"notifications": {"list": []}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="list", list_type="ARCHIVE", importance="WARNING", offset=5, limit=10)
|
||||
await tool(
|
||||
action="notification",
|
||||
subaction="list",
|
||||
list_type="ARCHIVE",
|
||||
importance="WARNING",
|
||||
offset=5,
|
||||
limit=10,
|
||||
)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ListNotifications" in body["query"]
|
||||
filt = body["variables"]["filter"]
|
||||
@@ -853,7 +857,8 @@ class TestNotificationsToolRequests:
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Test",
|
||||
subject="Sub",
|
||||
description="Desc",
|
||||
@@ -872,7 +877,7 @@ class TestNotificationsToolRequests:
|
||||
return_value=_graphql_response({"archiveNotification": {"id": "notif-1"}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="archive", notification_id="notif-1")
|
||||
await tool(action="notification", subaction="archive", notification_id="notif-1")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ArchiveNotification" in body["query"]
|
||||
assert body["variables"] == {"id": "notif-1"}
|
||||
@@ -880,8 +885,13 @@ class TestNotificationsToolRequests:
|
||||
@respx.mock
|
||||
async def test_delete_requires_confirm(self) -> None:
|
||||
tool = self._get_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool(action="delete", notification_id="n1", notification_type="UNREAD")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool(
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="n1",
|
||||
notification_type="UNREAD",
|
||||
)
|
||||
|
||||
@respx.mock
|
||||
async def test_delete_sends_id_and_type(self) -> None:
|
||||
@@ -890,7 +900,8 @@ class TestNotificationsToolRequests:
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(
|
||||
action="delete",
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="n1",
|
||||
notification_type="unread",
|
||||
confirm=True,
|
||||
@@ -906,7 +917,7 @@ class TestNotificationsToolRequests:
|
||||
return_value=_graphql_response({"archiveAll": {"archive": {"total": 1}}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="archive_all", importance="warning")
|
||||
await tool(action="notification", subaction="archive_all", importance="warning")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ArchiveAllNotifications" in body["query"]
|
||||
assert body["variables"]["importance"] == "WARNING"
|
||||
@@ -918,11 +929,11 @@ class TestNotificationsToolRequests:
|
||||
|
||||
|
||||
class TestRCloneToolRequests:
|
||||
"""Verify unraid_rclone tool constructs correct requests."""
|
||||
"""Verify unraid rclone tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_list_remotes_sends_correct_query(self) -> None:
|
||||
@@ -932,7 +943,7 @@ class TestRCloneToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="list_remotes")
|
||||
result = await tool(action="rclone", subaction="list_remotes")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ListRCloneRemotes" in body["query"]
|
||||
assert "remotes" in result
|
||||
@@ -953,7 +964,7 @@ class TestRCloneToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="config_form", provider_type="s3")
|
||||
await tool(action="rclone", subaction="config_form", provider_type="s3")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetRCloneConfigForm" in body["query"]
|
||||
assert body["variables"]["formOptions"]["providerType"] == "s3"
|
||||
@@ -975,7 +986,8 @@ class TestRCloneToolRequests:
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="my-s3",
|
||||
provider_type="s3",
|
||||
config_data={"bucket": "my-bucket"},
|
||||
@@ -990,8 +1002,8 @@ class TestRCloneToolRequests:
|
||||
@respx.mock
|
||||
async def test_delete_remote_requires_confirm(self) -> None:
|
||||
tool = self._get_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool(action="delete_remote", name="old-remote")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool(action="rclone", subaction="delete_remote", name="old-remote")
|
||||
|
||||
@respx.mock
|
||||
async def test_delete_remote_sends_name_when_confirmed(self) -> None:
|
||||
@@ -999,7 +1011,9 @@ class TestRCloneToolRequests:
|
||||
return_value=_graphql_response({"rclone": {"deleteRCloneRemote": True}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="delete_remote", name="old-remote", confirm=True)
|
||||
result = await tool(
|
||||
action="rclone", subaction="delete_remote", name="old-remote", confirm=True
|
||||
)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "DeleteRCloneRemote" in body["query"]
|
||||
assert body["variables"]["input"]["name"] == "old-remote"
|
||||
@@ -1012,11 +1026,11 @@ class TestRCloneToolRequests:
|
||||
|
||||
|
||||
class TestUsersToolRequests:
|
||||
"""Verify unraid_users tool constructs correct requests."""
|
||||
"""Verify unraid user tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.users", "register_users_tool", "unraid_users")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_me_sends_correct_query(self) -> None:
|
||||
@@ -1033,7 +1047,7 @@ class TestUsersToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="me")
|
||||
result = await tool(action="user", subaction="me")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetMe" in body["query"]
|
||||
assert result["name"] == "admin"
|
||||
@@ -1045,11 +1059,11 @@ class TestUsersToolRequests:
|
||||
|
||||
|
||||
class TestKeysToolRequests:
|
||||
"""Verify unraid_keys tool constructs correct requests."""
|
||||
"""Verify unraid key tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_list_sends_correct_query(self) -> None:
|
||||
@@ -1057,7 +1071,7 @@ class TestKeysToolRequests:
|
||||
return_value=_graphql_response({"apiKeys": [{"id": "k1", "name": "my-key"}]})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="list")
|
||||
result = await tool(action="key", subaction="list")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ListApiKeys" in body["query"]
|
||||
assert "keys" in result
|
||||
@@ -1070,7 +1084,7 @@ class TestKeysToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="get", key_id="k1")
|
||||
await tool(action="key", subaction="get", key_id="k1")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "GetApiKey" in body["query"]
|
||||
assert body["variables"] == {"id": "k1"}
|
||||
@@ -1092,7 +1106,7 @@ class TestKeysToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="create", name="new-key", roles=["read"])
|
||||
result = await tool(action="key", subaction="create", name="new-key", roles=["read"])
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "CreateApiKey" in body["query"]
|
||||
inp = body["variables"]["input"]
|
||||
@@ -1108,7 +1122,7 @@ class TestKeysToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
await tool(action="update", key_id="k1", name="renamed")
|
||||
await tool(action="key", subaction="update", key_id="k1", name="renamed")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "UpdateApiKey" in body["query"]
|
||||
inp = body["variables"]["input"]
|
||||
@@ -1119,7 +1133,7 @@ class TestKeysToolRequests:
|
||||
async def test_delete_requires_confirm(self) -> None:
|
||||
tool = self._get_tool()
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool(action="delete", key_id="k1")
|
||||
await tool(action="key", subaction="delete", key_id="k1")
|
||||
|
||||
@respx.mock
|
||||
async def test_delete_sends_ids_when_confirmed(self) -> None:
|
||||
@@ -1127,7 +1141,7 @@ class TestKeysToolRequests:
|
||||
return_value=_graphql_response({"apiKey": {"delete": True}})
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="delete", key_id="k1", confirm=True)
|
||||
result = await tool(action="key", subaction="delete", key_id="k1", confirm=True)
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "DeleteApiKey" in body["query"]
|
||||
assert body["variables"]["input"]["ids"] == ["k1"]
|
||||
@@ -1140,17 +1154,17 @@ class TestKeysToolRequests:
|
||||
|
||||
|
||||
class TestHealthToolRequests:
|
||||
"""Verify unraid_health tool constructs correct requests."""
|
||||
"""Verify unraid health tool constructs correct requests."""
|
||||
|
||||
@staticmethod
|
||||
def _get_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.health", "register_health_tool", "unraid_health")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
@respx.mock
|
||||
async def test_test_connection_sends_online_query(self) -> None:
|
||||
route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True}))
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="test_connection")
|
||||
result = await tool(action="health", subaction="test_connection")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "online" in body["query"]
|
||||
assert result["status"] == "connected"
|
||||
@@ -1178,7 +1192,7 @@ class TestHealthToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="check")
|
||||
result = await tool(action="health", subaction="check")
|
||||
body = _extract_request_body(route.calls.last.request)
|
||||
assert "ComprehensiveHealthCheck" in body["query"]
|
||||
assert result["status"] == "healthy"
|
||||
@@ -1188,7 +1202,7 @@ class TestHealthToolRequests:
|
||||
async def test_test_connection_measures_latency(self) -> None:
|
||||
respx.post(API_URL).mock(return_value=_graphql_response({"online": True}))
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="test_connection")
|
||||
result = await tool(action="health", subaction="test_connection")
|
||||
assert "latency_ms" in result
|
||||
assert isinstance(result["latency_ms"], float)
|
||||
|
||||
@@ -1212,7 +1226,7 @@ class TestHealthToolRequests:
|
||||
)
|
||||
)
|
||||
tool = self._get_tool()
|
||||
result = await tool(action="check")
|
||||
result = await tool(action="health", subaction="check")
|
||||
assert result["status"] == "warning"
|
||||
assert any("alert" in issue for issue in result.get("issues", []))
|
||||
|
||||
@@ -1249,17 +1263,17 @@ class TestCrossCuttingConcerns:
|
||||
async def test_tool_error_from_http_layer_propagates(self) -> None:
|
||||
"""When an HTTP error occurs, the ToolError bubbles up through the tool."""
|
||||
respx.post(API_URL).mock(return_value=httpx.Response(500, text="Server Error"))
|
||||
tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info")
|
||||
tool = make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
with pytest.raises(ToolError, match="Unraid API returned HTTP 500"):
|
||||
await tool(action="online")
|
||||
await tool(action="system", subaction="online")
|
||||
|
||||
@respx.mock
|
||||
async def test_network_error_propagates_through_tool(self) -> None:
|
||||
"""When a network error occurs, the ToolError bubbles up through the tool."""
|
||||
respx.post(API_URL).mock(side_effect=httpx.ConnectError("Connection refused"))
|
||||
tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info")
|
||||
tool = make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
with pytest.raises(ToolError, match="Network error connecting to Unraid API"):
|
||||
await tool(action="online")
|
||||
await tool(action="system", subaction="online")
|
||||
|
||||
@respx.mock
|
||||
async def test_graphql_error_propagates_through_tool(self) -> None:
|
||||
@@ -1267,6 +1281,6 @@ class TestCrossCuttingConcerns:
|
||||
respx.post(API_URL).mock(
|
||||
return_value=_graphql_response(errors=[{"message": "Permission denied"}])
|
||||
)
|
||||
tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info")
|
||||
tool = make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
with pytest.raises(ToolError, match="Permission denied"):
|
||||
await tool(action="online")
|
||||
await tool(action="system", subaction="online")
|
||||
|
||||
@@ -816,6 +816,15 @@ class TestAutoStart:
|
||||
|
||||
async def test_auto_start_only_starts_marked_subscriptions(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
# Clear default SNAPSHOT_ACTIONS configs; add one with auto_start=False
|
||||
# to verify that unmarked subscriptions are never started.
|
||||
mgr.subscription_configs.clear()
|
||||
mgr.subscription_configs["no_auto_sub"] = {
|
||||
"query": "subscription { test }",
|
||||
"resource": "unraid://test",
|
||||
"description": "Unmarked sub",
|
||||
"auto_start": False,
|
||||
}
|
||||
with patch.object(mgr, "start_subscription", new_callable=AsyncMock) as mock_start:
|
||||
await mgr.auto_start_all_subscriptions()
|
||||
mock_start.assert_not_called()
|
||||
@@ -837,6 +846,7 @@ class TestAutoStart:
|
||||
|
||||
async def test_auto_start_calls_start_for_marked(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.subscription_configs.clear()
|
||||
mgr.subscription_configs["auto_sub"] = {
|
||||
"query": "subscription { auto }",
|
||||
"resource": "unraid://auto",
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
# =============================================================================
|
||||
# test-tools.sh — Integration smoke-test for unraid-mcp MCP server tools
|
||||
#
|
||||
# Exercises every non-destructive action across all 10 tools using mcporter.
|
||||
# The server is launched ad-hoc via mcporter's --stdio flag so no persistent
|
||||
# process or registered server entry is required.
|
||||
# Exercises broad non-destructive smoke coverage of the consolidated `unraid` tool
|
||||
# (action + subaction pattern). The server is launched ad-hoc via mcporter's
|
||||
# --stdio flag so no persistent process or registered server entry is required.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/test-tools.sh [--timeout-ms N] [--parallel] [--verbose]
|
||||
# ./tests/mcporter/test-tools.sh [--timeout-ms N] [--parallel] [--verbose]
|
||||
#
|
||||
# Options:
|
||||
# --timeout-ms N Per-call timeout in milliseconds (default: 25000)
|
||||
@@ -134,6 +134,11 @@ check_prerequisites() {
|
||||
missing=true
|
||||
fi
|
||||
|
||||
if ! command -v jq &>/dev/null; then
|
||||
log_error "jq not found in PATH. Install it and re-run."
|
||||
missing=true
|
||||
fi
|
||||
|
||||
if [[ ! -f "${PROJECT_DIR}/pyproject.toml" ]]; then
|
||||
log_error "pyproject.toml not found at ${PROJECT_DIR}. Wrong directory?"
|
||||
missing=true
|
||||
@@ -146,9 +151,8 @@ check_prerequisites() {
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Server startup smoke-test
|
||||
# Launches the stdio server and calls unraid_health action=check.
|
||||
# Returns 0 if the server responds (even with an API error — that still
|
||||
# means the Python process started cleanly), non-zero on import failure.
|
||||
# Launches the stdio server and calls unraid action=health subaction=check.
|
||||
# Returns 0 if the server responds, non-zero on import failure.
|
||||
# ---------------------------------------------------------------------------
|
||||
smoke_test_server() {
|
||||
log_info "Smoke-testing server startup..."
|
||||
@@ -159,14 +163,13 @@ smoke_test_server() {
|
||||
--stdio "uv run unraid-mcp-server" \
|
||||
--cwd "${PROJECT_DIR}" \
|
||||
--name "unraid-smoke" \
|
||||
--tool unraid_health \
|
||||
--args '{"action":"check"}' \
|
||||
--tool unraid \
|
||||
--args '{"action":"health","subaction":"check"}' \
|
||||
--timeout 30000 \
|
||||
--output json \
|
||||
2>&1
|
||||
)" || true
|
||||
|
||||
# If mcporter returns the offline error the server failed to import/start
|
||||
if printf '%s' "${output}" | grep -q '"kind": "offline"'; then
|
||||
log_error "Server failed to start. Output:"
|
||||
printf '%s\n' "${output}" >&2
|
||||
@@ -177,18 +180,18 @@ smoke_test_server() {
|
||||
return 2
|
||||
fi
|
||||
|
||||
# Assert the response contains a valid tool response field, not a bare JSON error.
|
||||
# unraid_health action=check always returns {"status": ...} on success.
|
||||
local key_check
|
||||
key_check="$(
|
||||
printf '%s' "${output}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
if 'status' in d or 'success' in d or 'error' in d:
|
||||
if 'error' in d:
|
||||
print('error: tool returned error key — ' + str(d.get('error', '')))
|
||||
elif 'status' in d or 'success' in d:
|
||||
print('ok')
|
||||
else:
|
||||
print('missing: no status/success/error key in response')
|
||||
print('missing: no status/success key in response')
|
||||
except Exception as e:
|
||||
print('parse_error: ' + str(e))
|
||||
" 2>/dev/null
|
||||
@@ -206,19 +209,17 @@ except Exception as e:
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# mcporter call wrapper
|
||||
# Usage: mcporter_call <tool_name> <args_json>
|
||||
# Writes the mcporter JSON output to stdout.
|
||||
# Returns the mcporter exit code.
|
||||
# Usage: mcporter_call <args_json>
|
||||
# All calls go to the single `unraid` tool.
|
||||
# ---------------------------------------------------------------------------
|
||||
mcporter_call() {
|
||||
local tool_name="${1:?tool_name required}"
|
||||
local args_json="${2:?args_json required}"
|
||||
local args_json="${1:?args_json required}"
|
||||
|
||||
mcporter call \
|
||||
--stdio "uv run unraid-mcp-server" \
|
||||
--cwd "${PROJECT_DIR}" \
|
||||
--name "unraid" \
|
||||
--tool "${tool_name}" \
|
||||
--tool unraid \
|
||||
--args "${args_json}" \
|
||||
--timeout "${CALL_TIMEOUT_MS}" \
|
||||
--output json \
|
||||
@@ -227,25 +228,18 @@ mcporter_call() {
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test runner
|
||||
# Usage: run_test <label> <tool_name> <args_json> [expected_key]
|
||||
#
|
||||
# expected_key — optional jq-style python key path to validate in the
|
||||
# response (e.g. ".status" or ".containers"). If omitted,
|
||||
# any non-offline response is a PASS (tool errors from the
|
||||
# API — e.g. VMs disabled — are still considered PASS because
|
||||
# the tool itself responded correctly).
|
||||
# Usage: run_test <label> <args_json> [expected_key]
|
||||
# ---------------------------------------------------------------------------
|
||||
run_test() {
|
||||
local label="${1:?label required}"
|
||||
local tool="${2:?tool required}"
|
||||
local args="${3:?args required}"
|
||||
local expected_key="${4:-}"
|
||||
local args="${2:?args required}"
|
||||
local expected_key="${3:-}"
|
||||
|
||||
local t0
|
||||
t0="$(date +%s%N)"
|
||||
|
||||
local output
|
||||
output="$(mcporter_call "${tool}" "${args}" 2>&1)" || true
|
||||
output="$(mcporter_call "${args}" 2>&1)" || true
|
||||
|
||||
local elapsed_ms
|
||||
elapsed_ms="$(( ( $(date +%s%N) - t0 ) / 1000000 ))"
|
||||
@@ -266,6 +260,31 @@ run_test() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Always validate JSON is parseable and not an error payload
|
||||
local json_check
|
||||
json_check="$(
|
||||
printf '%s' "${output}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
if isinstance(d, dict) and ('error' in d or d.get('kind') == 'error'):
|
||||
print('error: ' + str(d.get('error', d.get('message', 'unknown error'))))
|
||||
else:
|
||||
print('ok')
|
||||
except Exception as e:
|
||||
print('invalid_json: ' + str(e))
|
||||
" 2>/dev/null
|
||||
)" || json_check="parse_error"
|
||||
|
||||
if [[ "${json_check}" != "ok" ]]; then
|
||||
printf "${C_RED}[FAIL]${C_RESET} %-55s ${C_DIM}%dms${C_RESET}\n" \
|
||||
"${label}" "${elapsed_ms}" | tee -a "${LOG_FILE}"
|
||||
printf ' response validation failed: %s\n' "${json_check}" | tee -a "${LOG_FILE}"
|
||||
FAIL_COUNT=$(( FAIL_COUNT + 1 ))
|
||||
FAIL_NAMES+=("${label}")
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate optional key presence
|
||||
if [[ -n "${expected_key}" ]]; then
|
||||
local key_check
|
||||
@@ -302,7 +321,7 @@ except Exception as e:
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Skip helper — use when a prerequisite (like a list) returned empty
|
||||
# Skip helper
|
||||
# ---------------------------------------------------------------------------
|
||||
skip_test() {
|
||||
local label="${1:?label required}"
|
||||
@@ -311,16 +330,31 @@ skip_test() {
|
||||
SKIP_COUNT=$(( SKIP_COUNT + 1 ))
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Safe JSON payload builder
|
||||
# Usage: _json_payload '<jq-template-with-$vars>' key1=value1 key2=value2 ...
|
||||
# Uses jq --arg to safely encode shell values into JSON, preventing injection
|
||||
# via special characters in variable values (e.g., quotes, backslashes).
|
||||
# ---------------------------------------------------------------------------
|
||||
_json_payload() {
|
||||
local template="${1:?template required}"; shift
|
||||
local jq_args=()
|
||||
local pair k v
|
||||
for pair in "$@"; do
|
||||
k="${pair%%=*}"
|
||||
v="${pair#*=}"
|
||||
jq_args+=(--arg "$k" "$v")
|
||||
done
|
||||
jq -n "${jq_args[@]}" "$template"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ID extractors
|
||||
# Each function calls the relevant list action and prints the first ID.
|
||||
# Prints nothing (empty string) if the list is empty or the call fails.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Extract first docker container ID
|
||||
get_docker_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call unraid_docker '{"action":"list"}' 2>/dev/null)" || return 0
|
||||
raw="$(mcporter_call '{"action":"docker","subaction":"list"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
@@ -333,10 +367,9 @@ except Exception:
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Extract first docker network ID
|
||||
get_network_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call unraid_docker '{"action":"networks"}' 2>/dev/null)" || return 0
|
||||
raw="$(mcporter_call '{"action":"docker","subaction":"networks"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
@@ -349,10 +382,9 @@ except Exception:
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Extract first VM ID
|
||||
get_vm_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call unraid_vm '{"action":"list"}' 2>/dev/null)" || return 0
|
||||
raw="$(mcporter_call '{"action":"vm","subaction":"list"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
@@ -365,10 +397,9 @@ except Exception:
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Extract first API key ID
|
||||
get_key_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call unraid_keys '{"action":"list"}' 2>/dev/null)" || return 0
|
||||
raw="$(mcporter_call '{"action":"key","subaction":"list"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
@@ -381,10 +412,9 @@ except Exception:
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Extract first disk ID
|
||||
get_disk_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call unraid_storage '{"action":"disks"}' 2>/dev/null)" || return 0
|
||||
raw="$(mcporter_call '{"action":"disk","subaction":"disks"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
@@ -397,16 +427,14 @@ except Exception:
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Extract first log file path
|
||||
get_log_path() {
|
||||
local raw
|
||||
raw="$(mcporter_call unraid_storage '{"action":"log_files"}' 2>/dev/null)" || return 0
|
||||
raw="$(mcporter_call '{"action":"disk","subaction":"log_files"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
files = d.get('log_files', [])
|
||||
# Prefer a plain text log (not binary like btmp/lastlog)
|
||||
for f in files:
|
||||
p = f.get('path', '')
|
||||
if p.endswith('.log') or 'syslog' in p or 'messages' in p:
|
||||
@@ -420,35 +448,10 @@ except Exception:
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Grouped test suites
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
suite_unraid_info() {
|
||||
printf '\n%b== unraid_info (19 actions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "unraid_info: overview" unraid_info '{"action":"overview"}'
|
||||
run_test "unraid_info: array" unraid_info '{"action":"array"}'
|
||||
run_test "unraid_info: network" unraid_info '{"action":"network"}'
|
||||
run_test "unraid_info: registration" unraid_info '{"action":"registration"}'
|
||||
run_test "unraid_info: connect" unraid_info '{"action":"connect"}'
|
||||
run_test "unraid_info: variables" unraid_info '{"action":"variables"}'
|
||||
run_test "unraid_info: metrics" unraid_info '{"action":"metrics"}'
|
||||
run_test "unraid_info: services" unraid_info '{"action":"services"}'
|
||||
run_test "unraid_info: display" unraid_info '{"action":"display"}'
|
||||
run_test "unraid_info: config" unraid_info '{"action":"config"}'
|
||||
run_test "unraid_info: online" unraid_info '{"action":"online"}'
|
||||
run_test "unraid_info: owner" unraid_info '{"action":"owner"}'
|
||||
run_test "unraid_info: settings" unraid_info '{"action":"settings"}'
|
||||
run_test "unraid_info: server" unraid_info '{"action":"server"}'
|
||||
run_test "unraid_info: servers" unraid_info '{"action":"servers"}'
|
||||
run_test "unraid_info: flash" unraid_info '{"action":"flash"}'
|
||||
run_test "unraid_info: ups_devices" unraid_info '{"action":"ups_devices"}'
|
||||
# ups_device and ups_config require a device_id — skip if no UPS devices found
|
||||
local ups_raw
|
||||
ups_raw="$(mcporter_call unraid_info '{"action":"ups_devices"}' 2>/dev/null)" || ups_raw=''
|
||||
local ups_id
|
||||
ups_id="$(printf '%s' "${ups_raw}" | python3 -c "
|
||||
get_ups_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call '{"action":"system","subaction":"ups_devices"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
@@ -457,153 +460,206 @@ try:
|
||||
print(devs[0].get('id', devs[0].get('name', '')))
|
||||
except Exception:
|
||||
pass
|
||||
" 2>/dev/null)" || ups_id=''
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Grouped test suites
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
suite_system() {
|
||||
printf '\n%b== system (info/metrics/UPS) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "system: overview" '{"action":"system","subaction":"overview"}'
|
||||
run_test "system: array" '{"action":"system","subaction":"array"}'
|
||||
run_test "system: network" '{"action":"system","subaction":"network"}'
|
||||
run_test "system: registration" '{"action":"system","subaction":"registration"}'
|
||||
run_test "system: variables" '{"action":"system","subaction":"variables"}'
|
||||
run_test "system: metrics" '{"action":"system","subaction":"metrics"}'
|
||||
run_test "system: services" '{"action":"system","subaction":"services"}'
|
||||
run_test "system: display" '{"action":"system","subaction":"display"}'
|
||||
run_test "system: config" '{"action":"system","subaction":"config"}'
|
||||
run_test "system: online" '{"action":"system","subaction":"online"}'
|
||||
run_test "system: owner" '{"action":"system","subaction":"owner"}'
|
||||
run_test "system: settings" '{"action":"system","subaction":"settings"}'
|
||||
run_test "system: server" '{"action":"system","subaction":"server"}'
|
||||
run_test "system: servers" '{"action":"system","subaction":"servers"}'
|
||||
run_test "system: flash" '{"action":"system","subaction":"flash"}'
|
||||
run_test "system: ups_devices" '{"action":"system","subaction":"ups_devices"}'
|
||||
|
||||
local ups_id
|
||||
ups_id="$(get_ups_id)" || ups_id=''
|
||||
if [[ -n "${ups_id}" ]]; then
|
||||
run_test "unraid_info: ups_device" unraid_info \
|
||||
"$(printf '{"action":"ups_device","device_id":"%s"}' "${ups_id}")"
|
||||
run_test "unraid_info: ups_config" unraid_info \
|
||||
"$(printf '{"action":"ups_config","device_id":"%s"}' "${ups_id}")"
|
||||
run_test "system: ups_device" \
|
||||
"$(_json_payload '{"action":"system","subaction":"ups_device","device_id":$v}' v="${ups_id}")"
|
||||
run_test "system: ups_config" \
|
||||
"$(_json_payload '{"action":"system","subaction":"ups_config","device_id":$v}' v="${ups_id}")"
|
||||
else
|
||||
skip_test "unraid_info: ups_device" "no UPS devices found"
|
||||
skip_test "unraid_info: ups_config" "no UPS devices found"
|
||||
skip_test "system: ups_device" "no UPS devices found"
|
||||
skip_test "system: ups_config" "no UPS devices found"
|
||||
fi
|
||||
}
|
||||
|
||||
suite_unraid_array() {
|
||||
printf '\n%b== unraid_array (1 read-only action) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
run_test "unraid_array: parity_status" unraid_array '{"action":"parity_status"}'
|
||||
# Destructive actions (parity_start/pause/resume/cancel) skipped
|
||||
suite_array() {
|
||||
printf '\n%b== array (read-only) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
run_test "array: parity_status" '{"action":"array","subaction":"parity_status"}'
|
||||
run_test "array: parity_history" '{"action":"array","subaction":"parity_history"}'
|
||||
# Destructive: parity_start/pause/resume/cancel, start_array, stop_array,
|
||||
# add_disk, remove_disk, mount_disk, unmount_disk, clear_disk_stats — skipped
|
||||
}
|
||||
|
||||
suite_unraid_storage() {
|
||||
printf '\n%b== unraid_storage (6 actions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
suite_disk() {
|
||||
printf '\n%b== disk (storage/shares/logs) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "unraid_storage: shares" unraid_storage '{"action":"shares"}'
|
||||
run_test "unraid_storage: disks" unraid_storage '{"action":"disks"}'
|
||||
run_test "unraid_storage: unassigned" unraid_storage '{"action":"unassigned"}'
|
||||
run_test "unraid_storage: log_files" unraid_storage '{"action":"log_files"}'
|
||||
run_test "disk: shares" '{"action":"disk","subaction":"shares"}'
|
||||
run_test "disk: disks" '{"action":"disk","subaction":"disks"}'
|
||||
run_test "disk: log_files" '{"action":"disk","subaction":"log_files"}'
|
||||
|
||||
# disk_details needs a disk ID
|
||||
local disk_id
|
||||
disk_id="$(get_disk_id)" || disk_id=''
|
||||
if [[ -n "${disk_id}" ]]; then
|
||||
run_test "unraid_storage: disk_details" unraid_storage \
|
||||
"$(printf '{"action":"disk_details","disk_id":"%s"}' "${disk_id}")"
|
||||
run_test "disk: disk_details" \
|
||||
"$(_json_payload '{"action":"disk","subaction":"disk_details","disk_id":$v}' v="${disk_id}")"
|
||||
else
|
||||
skip_test "unraid_storage: disk_details" "no disks found"
|
||||
skip_test "disk: disk_details" "no disks found"
|
||||
fi
|
||||
|
||||
# logs needs a valid log path
|
||||
local log_path
|
||||
log_path="$(get_log_path)" || log_path=''
|
||||
if [[ -n "${log_path}" ]]; then
|
||||
run_test "unraid_storage: logs" unraid_storage \
|
||||
"$(printf '{"action":"logs","log_path":"%s","tail_lines":20}' "${log_path}")"
|
||||
run_test "disk: logs" \
|
||||
"$(_json_payload '{"action":"disk","subaction":"logs","log_path":$v,"tail_lines":20}' v="${log_path}")"
|
||||
else
|
||||
skip_test "unraid_storage: logs" "no log files found"
|
||||
skip_test "disk: logs" "no log files found"
|
||||
fi
|
||||
# Destructive: flash_backup — skipped
|
||||
}
|
||||
|
||||
suite_unraid_docker() {
|
||||
printf '\n%b== unraid_docker (7 read-only actions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
suite_docker() {
|
||||
printf '\n%b== docker ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "unraid_docker: list" unraid_docker '{"action":"list"}'
|
||||
run_test "unraid_docker: networks" unraid_docker '{"action":"networks"}'
|
||||
run_test "unraid_docker: port_conflicts" unraid_docker '{"action":"port_conflicts"}'
|
||||
run_test "unraid_docker: check_updates" unraid_docker '{"action":"check_updates"}'
|
||||
run_test "docker: list" '{"action":"docker","subaction":"list"}'
|
||||
run_test "docker: networks" '{"action":"docker","subaction":"networks"}'
|
||||
|
||||
# details, logs, network_details need IDs
|
||||
local container_id
|
||||
container_id="$(get_docker_id)" || container_id=''
|
||||
if [[ -n "${container_id}" ]]; then
|
||||
run_test "unraid_docker: details" unraid_docker \
|
||||
"$(printf '{"action":"details","container_id":"%s"}' "${container_id}")"
|
||||
run_test "unraid_docker: logs" unraid_docker \
|
||||
"$(printf '{"action":"logs","container_id":"%s","tail_lines":20}' "${container_id}")"
|
||||
run_test "docker: details" \
|
||||
"$(_json_payload '{"action":"docker","subaction":"details","container_id":$v}' v="${container_id}")"
|
||||
else
|
||||
skip_test "unraid_docker: details" "no containers found"
|
||||
skip_test "unraid_docker: logs" "no containers found"
|
||||
skip_test "docker: details" "no containers found"
|
||||
fi
|
||||
|
||||
local network_id
|
||||
network_id="$(get_network_id)" || network_id=''
|
||||
if [[ -n "${network_id}" ]]; then
|
||||
run_test "unraid_docker: network_details" unraid_docker \
|
||||
"$(printf '{"action":"network_details","network_id":"%s"}' "${network_id}")"
|
||||
run_test "docker: network_details" \
|
||||
"$(_json_payload '{"action":"docker","subaction":"network_details","network_id":$v}' v="${network_id}")"
|
||||
else
|
||||
skip_test "unraid_docker: network_details" "no networks found"
|
||||
skip_test "docker: network_details" "no networks found"
|
||||
fi
|
||||
|
||||
# Destructive actions (start/stop/restart/pause/unpause/remove/update/update_all) skipped
|
||||
# Destructive/mutating: start/stop/restart — skipped
|
||||
}
|
||||
|
||||
suite_unraid_vm() {
|
||||
printf '\n%b== unraid_vm (2 read-only actions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
suite_vm() {
|
||||
printf '\n%b== vm ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "unraid_vm: list" unraid_vm '{"action":"list"}'
|
||||
run_test "vm: list" '{"action":"vm","subaction":"list"}'
|
||||
|
||||
local vm_id
|
||||
vm_id="$(get_vm_id)" || vm_id=''
|
||||
if [[ -n "${vm_id}" ]]; then
|
||||
run_test "unraid_vm: details" unraid_vm \
|
||||
"$(printf '{"action":"details","vm_id":"%s"}' "${vm_id}")"
|
||||
run_test "vm: details" \
|
||||
"$(_json_payload '{"action":"vm","subaction":"details","vm_id":$v}' v="${vm_id}")"
|
||||
else
|
||||
skip_test "unraid_vm: details" "no VMs found (or VM service unavailable)"
|
||||
skip_test "vm: details" "no VMs found (or VM service unavailable)"
|
||||
fi
|
||||
|
||||
# Destructive actions (start/stop/pause/resume/force_stop/reboot/reset) skipped
|
||||
# Destructive: start/stop/pause/resume/force_stop/reboot/reset — skipped
|
||||
}
|
||||
|
||||
suite_unraid_notifications() {
|
||||
printf '\n%b== unraid_notifications (4 read-only actions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
suite_notification() {
|
||||
printf '\n%b== notification ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "unraid_notifications: overview" unraid_notifications '{"action":"overview"}'
|
||||
run_test "unraid_notifications: list" unraid_notifications '{"action":"list"}'
|
||||
run_test "unraid_notifications: warnings" unraid_notifications '{"action":"warnings"}'
|
||||
run_test "unraid_notifications: unread" unraid_notifications '{"action":"unread"}'
|
||||
|
||||
# Destructive actions (create/archive/delete/delete_archived/archive_all/etc.) skipped
|
||||
run_test "notification: overview" '{"action":"notification","subaction":"overview"}'
|
||||
run_test "notification: list" '{"action":"notification","subaction":"list"}'
|
||||
run_test "notification: recalculate" '{"action":"notification","subaction":"recalculate"}'
|
||||
# Mutating: create/archive/mark_unread/delete/delete_archived/archive_all/etc. — skipped
|
||||
}
|
||||
|
||||
suite_unraid_rclone() {
|
||||
printf '\n%b== unraid_rclone (2 read-only actions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
suite_rclone() {
|
||||
printf '\n%b== rclone ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "unraid_rclone: list_remotes" unraid_rclone '{"action":"list_remotes"}'
|
||||
# config_form requires a provider_type — use "s3" as a safe, always-available provider
|
||||
run_test "unraid_rclone: config_form" unraid_rclone '{"action":"config_form","provider_type":"s3"}'
|
||||
|
||||
# Destructive actions (create_remote/delete_remote) skipped
|
||||
run_test "rclone: list_remotes" '{"action":"rclone","subaction":"list_remotes"}'
|
||||
run_test "rclone: config_form" '{"action":"rclone","subaction":"config_form","provider_type":"s3"}'
|
||||
# Destructive: create_remote/delete_remote — skipped
|
||||
}
|
||||
|
||||
suite_unraid_users() {
|
||||
printf '\n%b== unraid_users (1 action) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
run_test "unraid_users: me" unraid_users '{"action":"me"}'
|
||||
suite_user() {
|
||||
printf '\n%b== user ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
run_test "user: me" '{"action":"user","subaction":"me"}'
|
||||
}
|
||||
|
||||
suite_unraid_keys() {
|
||||
printf '\n%b== unraid_keys (2 read-only actions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
suite_key() {
|
||||
printf '\n%b== key (API keys) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "unraid_keys: list" unraid_keys '{"action":"list"}'
|
||||
run_test "key: list" '{"action":"key","subaction":"list"}'
|
||||
|
||||
local key_id
|
||||
key_id="$(get_key_id)" || key_id=''
|
||||
if [[ -n "${key_id}" ]]; then
|
||||
run_test "unraid_keys: get" unraid_keys \
|
||||
"$(printf '{"action":"get","key_id":"%s"}' "${key_id}")"
|
||||
run_test "key: get" \
|
||||
"$(_json_payload '{"action":"key","subaction":"get","key_id":$v}' v="${key_id}")"
|
||||
else
|
||||
skip_test "unraid_keys: get" "no API keys found"
|
||||
skip_test "key: get" "no API keys found"
|
||||
fi
|
||||
|
||||
# Destructive actions (create/update/delete) skipped
|
||||
# Destructive: create/update/delete/add_role/remove_role — skipped
|
||||
}
|
||||
|
||||
suite_unraid_health() {
|
||||
printf '\n%b== unraid_health (3 actions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
suite_health() {
|
||||
printf '\n%b== health ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "unraid_health: check" unraid_health '{"action":"check"}'
|
||||
run_test "unraid_health: test_connection" unraid_health '{"action":"test_connection"}'
|
||||
run_test "unraid_health: diagnose" unraid_health '{"action":"diagnose"}'
|
||||
run_test "health: check" '{"action":"health","subaction":"check"}'
|
||||
run_test "health: test_connection" '{"action":"health","subaction":"test_connection"}'
|
||||
run_test "health: diagnose" '{"action":"health","subaction":"diagnose"}'
|
||||
# setup triggers elicitation — skipped
|
||||
}
|
||||
|
||||
suite_customization() {
|
||||
printf '\n%b== customization ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "customization: theme" '{"action":"customization","subaction":"theme"}'
|
||||
run_test "customization: public_theme" '{"action":"customization","subaction":"public_theme"}'
|
||||
run_test "customization: sso_enabled" '{"action":"customization","subaction":"sso_enabled"}'
|
||||
run_test "customization: is_initial_setup" '{"action":"customization","subaction":"is_initial_setup"}'
|
||||
# Mutating: set_theme — skipped
|
||||
}
|
||||
|
||||
suite_plugin() {
|
||||
printf '\n%b== plugin ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "plugin: list" '{"action":"plugin","subaction":"list"}'
|
||||
# Destructive: add/remove — skipped
|
||||
}
|
||||
|
||||
suite_oidc() {
|
||||
printf '\n%b== oidc ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "oidc: providers" '{"action":"oidc","subaction":"providers"}'
|
||||
run_test "oidc: public_providers" '{"action":"oidc","subaction":"public_providers"}'
|
||||
run_test "oidc: configuration" '{"action":"oidc","subaction":"configuration"}'
|
||||
# provider and validate_session require IDs — skipped
|
||||
}
|
||||
|
||||
suite_live() {
|
||||
printf '\n%b== live (snapshot subscriptions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
# Note: these subactions open a transient WebSocket and wait for the first event.
|
||||
# Event-driven actions (parity_progress, ups_status, notifications_overview,
|
||||
# owner, server_status) return status=no_recent_events when no events arrive.
|
||||
run_test "live: cpu" '{"action":"live","subaction":"cpu"}'
|
||||
run_test "live: memory" '{"action":"live","subaction":"memory"}'
|
||||
run_test "live: cpu_telemetry" '{"action":"live","subaction":"cpu_telemetry"}'
|
||||
run_test "live: notifications_overview" '{"action":"live","subaction":"notifications_overview"}'
|
||||
run_test "live: log_tail" '{"action":"live","subaction":"log_tail"}'
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -633,13 +689,9 @@ print_summary() {
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Parallel runner — wraps each suite in a background subshell and waits
|
||||
# Parallel runner
|
||||
# ---------------------------------------------------------------------------
|
||||
run_parallel() {
|
||||
# Each suite is independent (only cross-suite dependency: IDs are fetched
|
||||
# fresh inside each suite function, not shared across suites).
|
||||
# Counter updates from subshells won't propagate to the parent — collect
|
||||
# results via temp files instead.
|
||||
log_warn "--parallel mode: per-suite counters aggregated via temp files."
|
||||
|
||||
local tmp_dir
|
||||
@@ -647,23 +699,26 @@ run_parallel() {
|
||||
trap 'rm -rf -- "${tmp_dir}"' RETURN
|
||||
|
||||
local suites=(
|
||||
suite_unraid_info
|
||||
suite_unraid_array
|
||||
suite_unraid_storage
|
||||
suite_unraid_docker
|
||||
suite_unraid_vm
|
||||
suite_unraid_notifications
|
||||
suite_unraid_rclone
|
||||
suite_unraid_users
|
||||
suite_unraid_keys
|
||||
suite_unraid_health
|
||||
suite_system
|
||||
suite_array
|
||||
suite_disk
|
||||
suite_docker
|
||||
suite_vm
|
||||
suite_notification
|
||||
suite_rclone
|
||||
suite_user
|
||||
suite_key
|
||||
suite_health
|
||||
suite_customization
|
||||
suite_plugin
|
||||
suite_oidc
|
||||
suite_live
|
||||
)
|
||||
|
||||
local pids=()
|
||||
local suite
|
||||
for suite in "${suites[@]}"; do
|
||||
(
|
||||
# Reset counters in subshell
|
||||
PASS_COUNT=0; FAIL_COUNT=0; SKIP_COUNT=0; FAIL_NAMES=()
|
||||
"${suite}"
|
||||
printf '%d %d %d\n' "${PASS_COUNT}" "${FAIL_COUNT}" "${SKIP_COUNT}" \
|
||||
@@ -673,13 +728,11 @@ run_parallel() {
|
||||
pids+=($!)
|
||||
done
|
||||
|
||||
# Wait for all background suites
|
||||
local pid
|
||||
for pid in "${pids[@]}"; do
|
||||
wait "${pid}" || true
|
||||
done
|
||||
|
||||
# Aggregate counters
|
||||
local f
|
||||
for f in "${tmp_dir}"/*.counts; do
|
||||
[[ -f "${f}" ]] || continue
|
||||
@@ -702,16 +755,20 @@ run_parallel() {
|
||||
# Sequential runner
|
||||
# ---------------------------------------------------------------------------
|
||||
run_sequential() {
|
||||
suite_unraid_info
|
||||
suite_unraid_array
|
||||
suite_unraid_storage
|
||||
suite_unraid_docker
|
||||
suite_unraid_vm
|
||||
suite_unraid_notifications
|
||||
suite_unraid_rclone
|
||||
suite_unraid_users
|
||||
suite_unraid_keys
|
||||
suite_unraid_health
|
||||
suite_system
|
||||
suite_array
|
||||
suite_disk
|
||||
suite_docker
|
||||
suite_vm
|
||||
suite_notification
|
||||
suite_rclone
|
||||
suite_user
|
||||
suite_key
|
||||
suite_health
|
||||
suite_customization
|
||||
suite_plugin
|
||||
suite_oidc
|
||||
suite_live
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -721,29 +778,21 @@ main() {
|
||||
parse_args "$@"
|
||||
|
||||
printf '%b%s%b\n' "${C_BOLD}" "$(printf '=%.0s' {1..65})" "${C_RESET}"
|
||||
printf '%b unraid-mcp integration smoke-test%b\n' "${C_BOLD}" "${C_RESET}"
|
||||
printf '%b unraid-mcp integration smoke-test (single unraid tool)%b\n' "${C_BOLD}" "${C_RESET}"
|
||||
printf '%b Project: %s%b\n' "${C_BOLD}" "${PROJECT_DIR}" "${C_RESET}"
|
||||
printf '%b Timeout: %dms/call | Parallel: %s%b\n' \
|
||||
"${C_BOLD}" "${CALL_TIMEOUT_MS}" "${USE_PARALLEL}" "${C_RESET}"
|
||||
printf '%b Log: %s%b\n' "${C_BOLD}" "${LOG_FILE}" "${C_RESET}"
|
||||
printf '%b%s%b\n\n' "${C_BOLD}" "$(printf '=%.0s' {1..65})" "${C_RESET}"
|
||||
|
||||
# Prerequisite gate
|
||||
check_prerequisites || exit 2
|
||||
|
||||
# Server startup gate — fail fast if the Python process can't start
|
||||
smoke_test_server || {
|
||||
log_error ""
|
||||
log_error "Server startup failed. Aborting — no tests will run."
|
||||
log_error ""
|
||||
log_error "To diagnose, run:"
|
||||
log_error " cd ${PROJECT_DIR} && uv run unraid-mcp-server"
|
||||
log_error ""
|
||||
log_error "If server.py has a broken import (e.g. missing tools/settings.py),"
|
||||
log_error "stash or revert the uncommitted server.py change first:"
|
||||
log_error " git stash -- unraid_mcp/server.py"
|
||||
log_error " ./scripts/test-tools.sh"
|
||||
log_error " git stash pop"
|
||||
exit 2
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,11 @@ Uses Hypothesis to fuzz tool inputs and verify the core invariant:
|
||||
other unhandled exception from arbitrary inputs is a bug.
|
||||
|
||||
Each test class targets a distinct tool domain and strategy profile:
|
||||
- Docker: arbitrary container IDs, action names, numeric params
|
||||
- Docker: arbitrary container IDs, subaction names, numeric params
|
||||
- Notifications: importance strings, list_type strings, field lengths
|
||||
- Keys: arbitrary key IDs, role lists, name strings
|
||||
- VM: arbitrary VM IDs, action names
|
||||
- Info: invalid action names (cross-tool invariant for the action guard)
|
||||
- VM: arbitrary VM IDs, subaction names
|
||||
- Info: invalid subaction names (cross-tool invariant for the subaction guard)
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
@@ -60,6 +60,10 @@ def _assert_only_tool_error(exc: BaseException) -> None:
|
||||
)
|
||||
|
||||
|
||||
def _make_tool() -> Any:
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Docker: arbitrary container IDs
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -78,17 +82,15 @@ class TestDockerContainerIdFuzzing:
|
||||
def test_details_arbitrary_container_id(self, container_id: str) -> None:
|
||||
"""Arbitrary container IDs for 'details' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
with contextlib.suppress(ToolError):
|
||||
# ToolError is the only acceptable exception — suppress it
|
||||
await tool_fn(action="details", container_id=container_id)
|
||||
await tool_fn(action="docker", subaction="details", container_id=container_id)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@@ -97,16 +99,14 @@ class TestDockerContainerIdFuzzing:
|
||||
def test_start_arbitrary_container_id(self, container_id: str) -> None:
|
||||
"""Arbitrary container IDs for 'start' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
with contextlib.suppress(ToolError):
|
||||
await tool_fn(action="start", container_id=container_id)
|
||||
await tool_fn(action="docker", subaction="start", container_id=container_id)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@@ -115,16 +115,14 @@ class TestDockerContainerIdFuzzing:
|
||||
def test_stop_arbitrary_container_id(self, container_id: str) -> None:
|
||||
"""Arbitrary container IDs for 'stop' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
with contextlib.suppress(ToolError):
|
||||
await tool_fn(action="stop", container_id=container_id)
|
||||
await tool_fn(action="docker", subaction="stop", container_id=container_id)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@@ -133,81 +131,58 @@ class TestDockerContainerIdFuzzing:
|
||||
def test_restart_arbitrary_container_id(self, container_id: str) -> None:
|
||||
"""Arbitrary container IDs for 'restart' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
# stop then start both need container list + mutation responses
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
with contextlib.suppress(ToolError):
|
||||
await tool_fn(action="restart", container_id=container_id)
|
||||
await tool_fn(action="docker", subaction="restart", container_id=container_id)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Docker: invalid action names
|
||||
# Docker: invalid subaction names
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDockerInvalidActions:
|
||||
"""Fuzz the action parameter with arbitrary strings.
|
||||
"""Fuzz the subaction parameter with arbitrary strings for the docker domain.
|
||||
|
||||
Invariant: invalid action names raise ToolError, never KeyError or crash.
|
||||
This validates the action guard that sits at the top of every tool function.
|
||||
Invariant: invalid subaction names raise ToolError, never KeyError or crash.
|
||||
This validates the subaction guard that sits inside every domain handler.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=200, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, action: str) -> None:
|
||||
"""Any non-valid action string must raise ToolError, not crash."""
|
||||
valid_actions = {
|
||||
def test_invalid_action_raises_tool_error(self, subaction: str) -> None:
|
||||
"""Any non-valid subaction string for docker must raise ToolError, not crash."""
|
||||
valid_subactions = {
|
||||
"list",
|
||||
"details",
|
||||
"start",
|
||||
"stop",
|
||||
"restart",
|
||||
"pause",
|
||||
"unpause",
|
||||
"remove",
|
||||
"update",
|
||||
"update_all",
|
||||
"logs",
|
||||
"networks",
|
||||
"network_details",
|
||||
"port_conflicts",
|
||||
"check_updates",
|
||||
"create_folder",
|
||||
"set_folder_children",
|
||||
"delete_entries",
|
||||
"move_to_folder",
|
||||
"move_to_position",
|
||||
"rename_folder",
|
||||
"create_folder_with_items",
|
||||
"update_view_prefs",
|
||||
"sync_templates",
|
||||
"reset_template_mappings",
|
||||
"refresh_digests",
|
||||
}
|
||||
if action in valid_actions:
|
||||
return # Skip valid actions — they have different semantics
|
||||
if subaction in valid_subactions:
|
||||
return # Skip valid subactions — they have different semantics
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"
|
||||
)
|
||||
with patch("unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock):
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock):
|
||||
try:
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action="docker", subaction=subaction)
|
||||
except ToolError:
|
||||
pass # Correct: invalid action raises ToolError
|
||||
pass # Correct: invalid subaction raises ToolError
|
||||
except Exception as exc:
|
||||
# Any other exception is a bug
|
||||
pytest.fail(
|
||||
f"Action '{action!r}' raised {type(exc).__name__} "
|
||||
f"subaction={subaction!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
@@ -234,14 +209,10 @@ class TestNotificationsEnumFuzzing:
|
||||
if importance.upper() in valid_importances:
|
||||
return # Skip valid values
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {
|
||||
@@ -249,7 +220,8 @@ class TestNotificationsEnumFuzzing:
|
||||
}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Test",
|
||||
subject="Sub",
|
||||
description="Desc",
|
||||
@@ -270,19 +242,15 @@ class TestNotificationsEnumFuzzing:
|
||||
if list_type.upper() in valid_list_types:
|
||||
return # Skip valid values
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {"notifications": {"list": []}}
|
||||
try:
|
||||
await tool_fn(action="list", list_type=list_type)
|
||||
await tool_fn(action="notification", subaction="list", list_type=list_type)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -305,14 +273,10 @@ class TestNotificationsEnumFuzzing:
|
||||
must raise ToolError for oversized values, never truncate silently or crash.
|
||||
"""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {
|
||||
@@ -320,7 +284,8 @@ class TestNotificationsEnumFuzzing:
|
||||
}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title=title,
|
||||
subject=subject,
|
||||
description=description,
|
||||
@@ -343,20 +308,17 @@ class TestNotificationsEnumFuzzing:
|
||||
if notif_type.upper() in valid_types:
|
||||
return
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {"deleteNotification": {}}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="delete",
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="some-id",
|
||||
notification_type=notif_type,
|
||||
confirm=True,
|
||||
@@ -372,44 +334,38 @@ class TestNotificationsEnumFuzzing:
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, action: str) -> None:
|
||||
"""Invalid action names for notifications tool raise ToolError."""
|
||||
valid_actions = {
|
||||
def test_invalid_action_raises_tool_error(self, subaction: str) -> None:
|
||||
"""Invalid subaction names for notifications domain raise ToolError."""
|
||||
valid_subactions = {
|
||||
"overview",
|
||||
"list",
|
||||
"warnings",
|
||||
"create",
|
||||
"archive",
|
||||
"unread",
|
||||
"mark_unread",
|
||||
"delete",
|
||||
"delete_archived",
|
||||
"archive_all",
|
||||
"archive_many",
|
||||
"create_unique",
|
||||
"unarchive_many",
|
||||
"unarchive_all",
|
||||
"recalculate",
|
||||
}
|
||||
if action in valid_actions:
|
||||
if subaction in valid_subactions:
|
||||
return
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
):
|
||||
try:
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action="notification", subaction=subaction)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"Action {action!r} raised {type(exc).__name__} "
|
||||
f"subaction={subaction!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
@@ -425,7 +381,7 @@ class TestKeysInputFuzzing:
|
||||
"""Fuzz API key management parameters.
|
||||
|
||||
Invariant: arbitrary key_id strings, names, and role lists never crash
|
||||
the keys tool — only ToolError or clean return values are acceptable.
|
||||
the keys domain — only ToolError or clean return values are acceptable.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@@ -433,14 +389,14 @@ class TestKeysInputFuzzing:
|
||||
def test_get_arbitrary_key_id(self, key_id: str) -> None:
|
||||
"""Arbitrary key_id for 'get' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"apiKey": None}
|
||||
try:
|
||||
await tool_fn(action="get", key_id=key_id)
|
||||
await tool_fn(action="key", subaction="get", key_id=key_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -453,16 +409,16 @@ class TestKeysInputFuzzing:
|
||||
def test_create_arbitrary_key_name(self, name: str) -> None:
|
||||
"""Arbitrary name strings for 'create' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {
|
||||
"apiKey": {"create": {"id": "1", "name": name, "key": "k", "roles": []}}
|
||||
}
|
||||
try:
|
||||
await tool_fn(action="create", name=name)
|
||||
await tool_fn(action="key", subaction="create", name=name)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -475,14 +431,16 @@ class TestKeysInputFuzzing:
|
||||
def test_add_role_arbitrary_roles(self, roles: list[str]) -> None:
|
||||
"""Arbitrary role lists for 'add_role' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"apiKey": {"addRole": True}}
|
||||
try:
|
||||
await tool_fn(action="add_role", key_id="some-key-id", roles=roles)
|
||||
await tool_fn(
|
||||
action="key", subaction="add_role", key_id="some-key-id", roles=roles
|
||||
)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -492,22 +450,22 @@ class TestKeysInputFuzzing:
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, action: str) -> None:
|
||||
"""Invalid action names for keys tool raise ToolError."""
|
||||
valid_actions = {"list", "get", "create", "update", "delete", "add_role", "remove_role"}
|
||||
if action in valid_actions:
|
||||
def test_invalid_action_raises_tool_error(self, subaction: str) -> None:
|
||||
"""Invalid subaction names for keys domain raise ToolError."""
|
||||
valid_subactions = {"list", "get", "create", "update", "delete", "add_role", "remove_role"}
|
||||
if subaction in valid_subactions:
|
||||
return
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
with patch("unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock):
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock):
|
||||
try:
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action="key", subaction=subaction)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"Action {action!r} raised {type(exc).__name__} "
|
||||
f"subaction={subaction!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
@@ -515,15 +473,15 @@ class TestKeysInputFuzzing:
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VM: arbitrary VM IDs and action names
|
||||
# VM: arbitrary VM IDs and subaction names
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVMInputFuzzing:
|
||||
"""Fuzz VM management parameters.
|
||||
|
||||
Invariant: arbitrary vm_id strings and action names must never crash
|
||||
the VM tool — only ToolError or clean return values are acceptable.
|
||||
Invariant: arbitrary vm_id strings and subaction names must never crash
|
||||
the VM domain — only ToolError or clean return values are acceptable.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@@ -531,17 +489,15 @@ class TestVMInputFuzzing:
|
||||
def test_start_arbitrary_vm_id(self, vm_id: str) -> None:
|
||||
"""Arbitrary vm_id for 'start' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.virtualization.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {"vm": {"start": True}}
|
||||
try:
|
||||
await tool_fn(action="start", vm_id=vm_id)
|
||||
await tool_fn(action="vm", subaction="start", vm_id=vm_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -554,17 +510,15 @@ class TestVMInputFuzzing:
|
||||
def test_stop_arbitrary_vm_id(self, vm_id: str) -> None:
|
||||
"""Arbitrary vm_id for 'stop' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.virtualization.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {"vm": {"stop": True}}
|
||||
try:
|
||||
await tool_fn(action="stop", vm_id=vm_id)
|
||||
await tool_fn(action="vm", subaction="stop", vm_id=vm_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -577,18 +531,16 @@ class TestVMInputFuzzing:
|
||||
def test_details_arbitrary_vm_id(self, vm_id: str) -> None:
|
||||
"""Arbitrary vm_id for 'details' must not crash the tool."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.virtualization.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
# Return an empty VM list so the lookup gracefully fails
|
||||
mock.return_value = {"vms": {"domains": []}}
|
||||
try:
|
||||
await tool_fn(action="details", vm_id=vm_id)
|
||||
await tool_fn(action="vm", subaction="details", vm_id=vm_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -598,9 +550,9 @@ class TestVMInputFuzzing:
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=200, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, action: str) -> None:
|
||||
"""Invalid action names for VM tool raise ToolError."""
|
||||
valid_actions = {
|
||||
def test_invalid_action_raises_tool_error(self, subaction: str) -> None:
|
||||
"""Invalid subaction names for VM domain raise ToolError."""
|
||||
valid_subactions = {
|
||||
"list",
|
||||
"details",
|
||||
"start",
|
||||
@@ -611,24 +563,22 @@ class TestVMInputFuzzing:
|
||||
"reboot",
|
||||
"reset",
|
||||
}
|
||||
if action in valid_actions:
|
||||
if subaction in valid_subactions:
|
||||
return
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.virtualization.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
):
|
||||
try:
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action="vm", subaction=subaction)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"Action {action!r} raised {type(exc).__name__} "
|
||||
f"subaction={subaction!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
@@ -664,18 +614,16 @@ class TestBoundaryValues:
|
||||
)
|
||||
@settings(max_examples=50, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_docker_details_adversarial_inputs(self, container_id: str) -> None:
|
||||
"""Adversarial container_id values must not crash the Docker tool."""
|
||||
"""Adversarial container_id values must not crash the Docker domain."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
try:
|
||||
await tool_fn(action="details", container_id=container_id)
|
||||
await tool_fn(action="docker", subaction="details", container_id=container_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -701,14 +649,10 @@ class TestBoundaryValues:
|
||||
def test_notifications_importance_adversarial(self, importance: str) -> None:
|
||||
"""Adversarial importance values must raise ToolError, not crash."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
)
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {
|
||||
@@ -716,7 +660,8 @@ class TestBoundaryValues:
|
||||
}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="t",
|
||||
subject="s",
|
||||
description="d",
|
||||
@@ -742,14 +687,14 @@ class TestBoundaryValues:
|
||||
def test_keys_get_adversarial_key_ids(self, key_id: str) -> None:
|
||||
"""Adversarial key_id values must not crash the keys get action."""
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"apiKey": None}
|
||||
try:
|
||||
await tool_fn(action="get", key_id=key_id)
|
||||
await tool_fn(action="key", subaction="get", key_id=key_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
@@ -759,49 +704,46 @@ class TestBoundaryValues:
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Info: action guard (invalid actions on a read-only tool)
|
||||
# Top-level action guard (invalid domain names)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestInfoActionGuard:
|
||||
"""Fuzz the action parameter on unraid_info.
|
||||
"""Fuzz the top-level action parameter (domain selector).
|
||||
|
||||
Invariant: the info tool exposes no mutations and its action guard must
|
||||
reject any invalid action with a ToolError rather than a KeyError crash.
|
||||
Invariant: the consolidated unraid tool must reject any invalid domain
|
||||
with a ToolError rather than a KeyError crash.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=200, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, action: str) -> None:
|
||||
"""Invalid action names for the info tool raise ToolError."""
|
||||
"""Invalid domain names raise ToolError."""
|
||||
valid_actions = {
|
||||
"overview",
|
||||
"array",
|
||||
"network",
|
||||
"registration",
|
||||
"variables",
|
||||
"metrics",
|
||||
"services",
|
||||
"display",
|
||||
"config",
|
||||
"online",
|
||||
"owner",
|
||||
"settings",
|
||||
"server",
|
||||
"servers",
|
||||
"flash",
|
||||
"ups_devices",
|
||||
"ups_device",
|
||||
"ups_config",
|
||||
"customization",
|
||||
"disk",
|
||||
"docker",
|
||||
"health",
|
||||
"key",
|
||||
"live",
|
||||
"notification",
|
||||
"oidc",
|
||||
"plugin",
|
||||
"rclone",
|
||||
"setting",
|
||||
"system",
|
||||
"user",
|
||||
"vm",
|
||||
}
|
||||
if action in valid_actions:
|
||||
return
|
||||
|
||||
async def _run_test():
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info")
|
||||
with patch("unraid_mcp.tools.info.make_graphql_request", new_callable=AsyncMock):
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock):
|
||||
try:
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action=action, subaction="list")
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Safety audit tests for destructive action confirmation guards.
|
||||
|
||||
Verifies that all destructive operations across every tool require
|
||||
Verifies that all destructive operations across every domain require
|
||||
explicit `confirm=True` before execution, and that the DESTRUCTIVE_ACTIONS
|
||||
registries are complete and consistent.
|
||||
"""
|
||||
@@ -9,97 +9,75 @@ from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
# conftest.py is the shared test-helper module for this project.
|
||||
# pytest automatically adds tests/ to sys.path, making it importable here
|
||||
# without a package __init__.py. Do NOT add tests/__init__.py — it breaks
|
||||
# conftest.py's fixture auto-discovery.
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
# Import DESTRUCTIVE_ACTIONS sets from every tool module that defines one
|
||||
from unraid_mcp.tools.array import DESTRUCTIVE_ACTIONS as ARRAY_DESTRUCTIVE
|
||||
from unraid_mcp.tools.array import MUTATIONS as ARRAY_MUTATIONS
|
||||
from unraid_mcp.tools.keys import DESTRUCTIVE_ACTIONS as KEYS_DESTRUCTIVE
|
||||
from unraid_mcp.tools.keys import MUTATIONS as KEYS_MUTATIONS
|
||||
from unraid_mcp.tools.notifications import DESTRUCTIVE_ACTIONS as NOTIF_DESTRUCTIVE
|
||||
from unraid_mcp.tools.notifications import MUTATIONS as NOTIF_MUTATIONS
|
||||
from unraid_mcp.tools.plugins import DESTRUCTIVE_ACTIONS as PLUGINS_DESTRUCTIVE
|
||||
from unraid_mcp.tools.plugins import MUTATIONS as PLUGINS_MUTATIONS
|
||||
from unraid_mcp.tools.rclone import DESTRUCTIVE_ACTIONS as RCLONE_DESTRUCTIVE
|
||||
from unraid_mcp.tools.rclone import MUTATIONS as RCLONE_MUTATIONS
|
||||
from unraid_mcp.tools.settings import DESTRUCTIVE_ACTIONS as SETTINGS_DESTRUCTIVE
|
||||
from unraid_mcp.tools.settings import MUTATIONS as SETTINGS_MUTATIONS
|
||||
from unraid_mcp.tools.storage import DESTRUCTIVE_ACTIONS as STORAGE_DESTRUCTIVE
|
||||
from unraid_mcp.tools.storage import MUTATIONS as STORAGE_MUTATIONS
|
||||
from unraid_mcp.tools.virtualization import DESTRUCTIVE_ACTIONS as VM_DESTRUCTIVE
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS as VM_MUTATIONS
|
||||
# Import DESTRUCTIVE_ACTIONS and MUTATIONS sets from the consolidated unraid module
|
||||
from unraid_mcp.tools.unraid import (
|
||||
_ARRAY_DESTRUCTIVE,
|
||||
_ARRAY_MUTATIONS,
|
||||
_DISK_DESTRUCTIVE,
|
||||
_DISK_MUTATIONS,
|
||||
_KEY_DESTRUCTIVE,
|
||||
_KEY_MUTATIONS,
|
||||
_NOTIFICATION_DESTRUCTIVE,
|
||||
_NOTIFICATION_MUTATIONS,
|
||||
_PLUGIN_DESTRUCTIVE,
|
||||
_PLUGIN_MUTATIONS,
|
||||
_RCLONE_DESTRUCTIVE,
|
||||
_RCLONE_MUTATIONS,
|
||||
_SETTING_DESTRUCTIVE,
|
||||
_SETTING_MUTATIONS,
|
||||
_VM_DESTRUCTIVE,
|
||||
_VM_MUTATIONS,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Known destructive actions registry (ground truth for this audit)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Every destructive action in the codebase, keyed by (tool_module, tool_name)
|
||||
KNOWN_DESTRUCTIVE: dict[str, dict[str, set[str] | str]] = {
|
||||
KNOWN_DESTRUCTIVE: dict[str, dict] = {
|
||||
"array": {
|
||||
"module": "unraid_mcp.tools.array",
|
||||
"register_fn": "register_array_tool",
|
||||
"tool_name": "unraid_array",
|
||||
"actions": {"remove_disk", "clear_disk_stats", "stop_array"},
|
||||
"runtime_set": ARRAY_DESTRUCTIVE,
|
||||
"runtime_set": _ARRAY_DESTRUCTIVE,
|
||||
"mutations": _ARRAY_MUTATIONS,
|
||||
},
|
||||
"vm": {
|
||||
"module": "unraid_mcp.tools.virtualization",
|
||||
"register_fn": "register_vm_tool",
|
||||
"tool_name": "unraid_vm",
|
||||
"actions": {"force_stop", "reset"},
|
||||
"runtime_set": VM_DESTRUCTIVE,
|
||||
"runtime_set": _VM_DESTRUCTIVE,
|
||||
"mutations": _VM_MUTATIONS,
|
||||
},
|
||||
"notifications": {
|
||||
"module": "unraid_mcp.tools.notifications",
|
||||
"register_fn": "register_notifications_tool",
|
||||
"tool_name": "unraid_notifications",
|
||||
"notification": {
|
||||
"actions": {"delete", "delete_archived"},
|
||||
"runtime_set": NOTIF_DESTRUCTIVE,
|
||||
"runtime_set": _NOTIFICATION_DESTRUCTIVE,
|
||||
"mutations": _NOTIFICATION_MUTATIONS,
|
||||
},
|
||||
"rclone": {
|
||||
"module": "unraid_mcp.tools.rclone",
|
||||
"register_fn": "register_rclone_tool",
|
||||
"tool_name": "unraid_rclone",
|
||||
"actions": {"delete_remote"},
|
||||
"runtime_set": RCLONE_DESTRUCTIVE,
|
||||
"runtime_set": _RCLONE_DESTRUCTIVE,
|
||||
"mutations": _RCLONE_MUTATIONS,
|
||||
},
|
||||
"keys": {
|
||||
"module": "unraid_mcp.tools.keys",
|
||||
"register_fn": "register_keys_tool",
|
||||
"tool_name": "unraid_keys",
|
||||
"key": {
|
||||
"actions": {"delete"},
|
||||
"runtime_set": KEYS_DESTRUCTIVE,
|
||||
"runtime_set": _KEY_DESTRUCTIVE,
|
||||
"mutations": _KEY_MUTATIONS,
|
||||
},
|
||||
"storage": {
|
||||
"module": "unraid_mcp.tools.storage",
|
||||
"register_fn": "register_storage_tool",
|
||||
"tool_name": "unraid_storage",
|
||||
"disk": {
|
||||
"actions": {"flash_backup"},
|
||||
"runtime_set": STORAGE_DESTRUCTIVE,
|
||||
"runtime_set": _DISK_DESTRUCTIVE,
|
||||
"mutations": _DISK_MUTATIONS,
|
||||
},
|
||||
"settings": {
|
||||
"module": "unraid_mcp.tools.settings",
|
||||
"register_fn": "register_settings_tool",
|
||||
"tool_name": "unraid_settings",
|
||||
"actions": {
|
||||
"configure_ups",
|
||||
"setting": {
|
||||
"actions": {"configure_ups"},
|
||||
"runtime_set": _SETTING_DESTRUCTIVE,
|
||||
"mutations": _SETTING_MUTATIONS,
|
||||
},
|
||||
"runtime_set": SETTINGS_DESTRUCTIVE,
|
||||
},
|
||||
"plugins": {
|
||||
"module": "unraid_mcp.tools.plugins",
|
||||
"register_fn": "register_plugins_tool",
|
||||
"tool_name": "unraid_plugins",
|
||||
"plugin": {
|
||||
"actions": {"remove"},
|
||||
"runtime_set": PLUGINS_DESTRUCTIVE,
|
||||
"runtime_set": _PLUGIN_DESTRUCTIVE,
|
||||
"mutations": _PLUGIN_MUTATIONS,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -112,90 +90,53 @@ KNOWN_DESTRUCTIVE: dict[str, dict[str, set[str] | str]] = {
|
||||
class TestDestructiveActionRegistries:
|
||||
"""Verify that DESTRUCTIVE_ACTIONS sets in source code match the audit."""
|
||||
|
||||
@pytest.mark.parametrize("tool_key", list(KNOWN_DESTRUCTIVE.keys()))
|
||||
def test_destructive_set_matches_audit(self, tool_key: str) -> None:
|
||||
"""Each tool's DESTRUCTIVE_ACTIONS must exactly match the audited set."""
|
||||
info = KNOWN_DESTRUCTIVE[tool_key]
|
||||
@pytest.mark.parametrize("domain", list(KNOWN_DESTRUCTIVE.keys()))
|
||||
def test_destructive_set_matches_audit(self, domain: str) -> None:
|
||||
info = KNOWN_DESTRUCTIVE[domain]
|
||||
assert info["runtime_set"] == info["actions"], (
|
||||
f"{tool_key}: DESTRUCTIVE_ACTIONS is {info['runtime_set']}, expected {info['actions']}"
|
||||
f"{domain}: DESTRUCTIVE_ACTIONS is {info['runtime_set']}, expected {info['actions']}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("tool_key", list(KNOWN_DESTRUCTIVE.keys()))
|
||||
def test_destructive_actions_are_valid_mutations(self, tool_key: str) -> None:
|
||||
"""Every destructive action must correspond to an actual mutation."""
|
||||
info = KNOWN_DESTRUCTIVE[tool_key]
|
||||
mutations_map = {
|
||||
"array": ARRAY_MUTATIONS,
|
||||
"vm": VM_MUTATIONS,
|
||||
"notifications": NOTIF_MUTATIONS,
|
||||
"rclone": RCLONE_MUTATIONS,
|
||||
"keys": KEYS_MUTATIONS,
|
||||
"storage": STORAGE_MUTATIONS,
|
||||
"settings": SETTINGS_MUTATIONS,
|
||||
"plugins": PLUGINS_MUTATIONS,
|
||||
}
|
||||
mutations = mutations_map[tool_key]
|
||||
@pytest.mark.parametrize("domain", list(KNOWN_DESTRUCTIVE.keys()))
|
||||
def test_destructive_actions_are_valid_mutations(self, domain: str) -> None:
|
||||
info = KNOWN_DESTRUCTIVE[domain]
|
||||
for action in info["actions"]:
|
||||
assert action in mutations, (
|
||||
f"{tool_key}: destructive action '{action}' is not in MUTATIONS"
|
||||
assert action in info["mutations"], (
|
||||
f"{domain}: destructive action '{action}' is not in MUTATIONS"
|
||||
)
|
||||
|
||||
def test_no_delete_or_remove_mutations_missing_from_destructive(self) -> None:
|
||||
"""Any mutation with 'delete' or 'remove' in its name should be destructive.
|
||||
|
||||
Exceptions (documented, intentional):
|
||||
keys/remove_role — fully reversible; the role can always be re-added via add_role.
|
||||
No data is lost and there is no irreversible side-effect.
|
||||
key/remove_role — fully reversible; the role can always be re-added via add_role.
|
||||
"""
|
||||
# Mutations explicitly exempted from the delete/remove heuristic with justification.
|
||||
# Add entries here only when the action is demonstrably reversible and non-destructive.
|
||||
_HEURISTIC_EXCEPTIONS: frozenset[str] = frozenset(
|
||||
{
|
||||
"keys/remove_role", # reversible — role can be re-added via add_role
|
||||
"key/remove_role", # reversible — role can be re-added via add_role
|
||||
}
|
||||
)
|
||||
|
||||
all_mutations = {
|
||||
"array": ARRAY_MUTATIONS,
|
||||
"vm": VM_MUTATIONS,
|
||||
"notifications": NOTIF_MUTATIONS,
|
||||
"rclone": RCLONE_MUTATIONS,
|
||||
"keys": KEYS_MUTATIONS,
|
||||
"storage": STORAGE_MUTATIONS,
|
||||
"settings": SETTINGS_MUTATIONS,
|
||||
"plugins": PLUGINS_MUTATIONS,
|
||||
}
|
||||
all_destructive = {
|
||||
"array": ARRAY_DESTRUCTIVE,
|
||||
"vm": VM_DESTRUCTIVE,
|
||||
"notifications": NOTIF_DESTRUCTIVE,
|
||||
"rclone": RCLONE_DESTRUCTIVE,
|
||||
"keys": KEYS_DESTRUCTIVE,
|
||||
"storage": STORAGE_DESTRUCTIVE,
|
||||
"settings": SETTINGS_DESTRUCTIVE,
|
||||
"plugins": PLUGINS_DESTRUCTIVE,
|
||||
}
|
||||
missing: list[str] = []
|
||||
for tool_key, mutations in all_mutations.items():
|
||||
destructive = all_destructive[tool_key]
|
||||
missing.extend(
|
||||
f"{tool_key}/{action_name}"
|
||||
for action_name in mutations
|
||||
if ("delete" in action_name or "remove" in action_name)
|
||||
for domain, info in KNOWN_DESTRUCTIVE.items():
|
||||
destructive = info["runtime_set"]
|
||||
for action_name in info["mutations"]:
|
||||
if (
|
||||
("delete" in action_name or "remove" in action_name)
|
||||
and action_name not in destructive
|
||||
and f"{tool_key}/{action_name}" not in _HEURISTIC_EXCEPTIONS
|
||||
)
|
||||
and f"{domain}/{action_name}" not in _HEURISTIC_EXCEPTIONS
|
||||
):
|
||||
missing.append(f"{domain}/{action_name}")
|
||||
assert not missing, (
|
||||
f"Mutations with 'delete'/'remove' not in DESTRUCTIVE_ACTIONS: {missing}"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Confirmation guard tests: calling without confirm=True raises ToolError
|
||||
# Confirmation guard tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Build parametrized test cases: (tool_key, action, kwargs_without_confirm)
|
||||
# Each destructive action needs the minimum required params (minus confirm)
|
||||
# (action, subaction, extra_kwargs)
|
||||
_DESTRUCTIVE_TEST_CASES: list[tuple[str, str, dict]] = [
|
||||
# Array
|
||||
("array", "remove_disk", {"disk_id": "abc123:local"}),
|
||||
@@ -205,161 +146,112 @@ _DESTRUCTIVE_TEST_CASES: list[tuple[str, str, dict]] = [
|
||||
("vm", "force_stop", {"vm_id": "test-vm-uuid"}),
|
||||
("vm", "reset", {"vm_id": "test-vm-uuid"}),
|
||||
# Notifications
|
||||
("notifications", "delete", {"notification_id": "notif-1", "notification_type": "UNREAD"}),
|
||||
("notifications", "delete_archived", {}),
|
||||
("notification", "delete", {"notification_id": "notif-1", "notification_type": "UNREAD"}),
|
||||
("notification", "delete_archived", {}),
|
||||
# RClone
|
||||
("rclone", "delete_remote", {"name": "my-remote"}),
|
||||
# Keys
|
||||
("keys", "delete", {"key_id": "key-123"}),
|
||||
# Storage
|
||||
("key", "delete", {"key_id": "key-123"}),
|
||||
# Disk (flash_backup)
|
||||
(
|
||||
"storage",
|
||||
"disk",
|
||||
"flash_backup",
|
||||
{"remote_name": "r", "source_path": "/boot", "destination_path": "r:b"},
|
||||
),
|
||||
# Settings
|
||||
("settings", "configure_ups", {"ups_config": {"mode": "slave"}}),
|
||||
("setting", "configure_ups", {"ups_config": {"mode": "slave"}}),
|
||||
# Plugins
|
||||
("plugins", "remove", {"names": ["my-plugin"]}),
|
||||
("plugin", "remove", {"names": ["my-plugin"]}),
|
||||
]
|
||||
|
||||
|
||||
_CASE_IDS = [f"{c[0]}/{c[1]}" for c in _DESTRUCTIVE_TEST_CASES]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_array_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.array.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
_MODULE = "unraid_mcp.tools.unraid"
|
||||
_REGISTER_FN = "register_unraid_tool"
|
||||
_TOOL_NAME = "unraid"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_vm_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.virtualization.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch(f"{_MODULE}.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_notif_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_rclone_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.rclone.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_keys_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_storage_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.storage.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_settings_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.settings.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_plugins_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.plugins.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
# Map tool_key -> (module path, register fn, tool name)
|
||||
_TOOL_REGISTRY = {
|
||||
"array": ("unraid_mcp.tools.array", "register_array_tool", "unraid_array"),
|
||||
"vm": ("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm"),
|
||||
"notifications": (
|
||||
"unraid_mcp.tools.notifications",
|
||||
"register_notifications_tool",
|
||||
"unraid_notifications",
|
||||
),
|
||||
"rclone": ("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone"),
|
||||
"keys": ("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys"),
|
||||
"storage": ("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage"),
|
||||
"settings": ("unraid_mcp.tools.settings", "register_settings_tool", "unraid_settings"),
|
||||
"plugins": ("unraid_mcp.tools.plugins", "register_plugins_tool", "unraid_plugins"),
|
||||
}
|
||||
|
||||
|
||||
class TestConfirmationGuards:
|
||||
"""Every destructive action must reject calls without confirm=True."""
|
||||
|
||||
@pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_rejects_without_confirm(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_array_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_storage_graphql: AsyncMock,
|
||||
_mock_settings_graphql: AsyncMock,
|
||||
_mock_plugins_graphql: AsyncMock,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""Calling a destructive action without confirm=True must raise ToolError."""
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError, match="confirm=True"):
|
||||
await tool_fn(action=action, **kwargs)
|
||||
await tool_fn(action=action, subaction=subaction, **kwargs)
|
||||
|
||||
@pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_rejects_with_confirm_false(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_array_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_storage_graphql: AsyncMock,
|
||||
_mock_settings_graphql: AsyncMock,
|
||||
_mock_plugins_graphql: AsyncMock,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""Explicitly passing confirm=False must still raise ToolError."""
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError, match="confirm=True"):
|
||||
await tool_fn(action=action, confirm=False, **kwargs)
|
||||
await tool_fn(action=action, subaction=subaction, confirm=False, **kwargs)
|
||||
|
||||
@pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_error_message_includes_action_name(
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_error_message_includes_subaction_name(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_array_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_storage_graphql: AsyncMock,
|
||||
_mock_settings_graphql: AsyncMock,
|
||||
_mock_plugins_graphql: AsyncMock,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""The error message should include the action name for clarity."""
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError, match=subaction):
|
||||
await tool_fn(action=action, subaction=subaction, **kwargs)
|
||||
|
||||
with pytest.raises(ToolError, match=action):
|
||||
await tool_fn(action=action, **kwargs)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Strict guard tests: no network calls escape when unconfirmed
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNoGraphQLCallsWhenUnconfirmed:
|
||||
"""The most critical safety property: when confirm is missing/False,
|
||||
NO GraphQL request must ever reach the network layer.
|
||||
"""
|
||||
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_no_graphql_call_without_confirm(
|
||||
self,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError):
|
||||
await tool_fn(action=action, subaction=subaction, **kwargs)
|
||||
_mock_graphql.assert_not_called()
|
||||
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_no_graphql_call_with_confirm_false(
|
||||
self,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError):
|
||||
await tool_fn(action=action, subaction=subaction, confirm=False, **kwargs)
|
||||
_mock_graphql.assert_not_called()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -370,30 +262,29 @@ class TestConfirmationGuards:
|
||||
class TestConfirmAllowsExecution:
|
||||
"""Destructive actions with confirm=True should reach the GraphQL layer."""
|
||||
|
||||
async def test_vm_force_stop_with_confirm(self, _mock_vm_graphql: AsyncMock) -> None:
|
||||
_mock_vm_graphql.return_value = {"vm": {"forceStop": True}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm")
|
||||
result = await tool_fn(action="force_stop", vm_id="test-uuid", confirm=True)
|
||||
async def test_vm_force_stop_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"forceStop": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="vm", subaction="force_stop", vm_id="test-uuid", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_vm_reset_with_confirm(self, _mock_vm_graphql: AsyncMock) -> None:
|
||||
_mock_vm_graphql.return_value = {"vm": {"reset": True}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm")
|
||||
result = await tool_fn(action="reset", vm_id="test-uuid", confirm=True)
|
||||
async def test_vm_reset_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"reset": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="vm", subaction="reset", vm_id="test-uuid", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_notifications_delete_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None:
|
||||
_mock_notif_graphql.return_value = {
|
||||
async def test_notifications_delete_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"deleteNotification": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"
|
||||
)
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="delete",
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="notif-1",
|
||||
notification_type="UNREAD",
|
||||
confirm=True,
|
||||
@@ -401,43 +292,38 @@ class TestConfirmAllowsExecution:
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_notifications_delete_archived_with_confirm(
|
||||
self, _mock_notif_graphql: AsyncMock
|
||||
self, _mock_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_notif_graphql.return_value = {
|
||||
_mock_graphql.return_value = {
|
||||
"deleteArchivedNotifications": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"
|
||||
)
|
||||
result = await tool_fn(action="delete_archived", confirm=True)
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="notification", subaction="delete_archived", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_rclone_delete_remote_with_confirm(self, _mock_rclone_graphql: AsyncMock) -> None:
|
||||
_mock_rclone_graphql.return_value = {"rclone": {"deleteRCloneRemote": True}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone")
|
||||
result = await tool_fn(action="delete_remote", name="my-remote", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_keys_delete_with_confirm(self, _mock_keys_graphql: AsyncMock) -> None:
|
||||
_mock_keys_graphql.return_value = {"apiKey": {"delete": True}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
result = await tool_fn(action="delete", key_id="key-123", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_storage_flash_backup_with_confirm(
|
||||
self, _mock_storage_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_storage_graphql.return_value = {
|
||||
"initiateFlashBackup": {"status": "started", "jobId": "j:1"}
|
||||
}
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage"
|
||||
)
|
||||
async def test_rclone_delete_remote_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"deleteRCloneRemote": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="flash_backup",
|
||||
action="rclone", subaction="delete_remote", name="my-remote", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_keys_delete_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"apiKey": {"delete": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="key", subaction="delete", key_id="key-123", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_disk_flash_backup_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"initiateFlashBackup": {"status": "started", "jobId": "j:1"}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
confirm=True,
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
@@ -445,125 +331,46 @@ class TestConfirmAllowsExecution:
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_settings_configure_ups_with_confirm(
|
||||
self, _mock_settings_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_settings_graphql.return_value = {"configureUps": True}
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.settings", "register_settings_tool", "unraid_settings"
|
||||
)
|
||||
async def test_settings_configure_ups_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"configureUps": True}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="configure_ups", confirm=True, ups_config={"mode": "master", "cable": "usb"}
|
||||
action="setting",
|
||||
subaction="configure_ups",
|
||||
confirm=True,
|
||||
ups_config={"mode": "master", "cable": "usb"},
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_array_remove_disk_with_confirm(self, _mock_array_graphql: AsyncMock) -> None:
|
||||
_mock_array_graphql.return_value = {"array": {"removeDiskFromArray": {"state": "STOPPED"}}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.array", "register_array_tool", "unraid_array")
|
||||
result = await tool_fn(action="remove_disk", disk_id="abc:local", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_array_clear_disk_stats_with_confirm(
|
||||
self, _mock_array_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_array_graphql.return_value = {"array": {"clearArrayDiskStatistics": True}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.array", "register_array_tool", "unraid_array")
|
||||
result = await tool_fn(action="clear_disk_stats", disk_id="abc:local", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_array_stop_array_with_confirm(self, _mock_array_graphql: AsyncMock) -> None:
|
||||
_mock_array_graphql.return_value = {"array": {"setState": {"state": "STOPPED"}}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.array", "register_array_tool", "unraid_array")
|
||||
result = await tool_fn(action="stop_array", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_plugins_remove_with_confirm(self, _mock_plugins_graphql: AsyncMock) -> None:
|
||||
_mock_plugins_graphql.return_value = {"removePlugin": True}
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.plugins", "register_plugins_tool", "unraid_plugins"
|
||||
async def test_array_remove_disk_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"array": {"removeDiskFromArray": {"state": "STOPPED"}}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="array", subaction="remove_disk", disk_id="abc:local", confirm=True
|
||||
)
|
||||
result = await tool_fn(action="remove", names=["my-plugin"], confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_array_clear_disk_stats_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"array": {"clearArrayDiskStatistics": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="array", subaction="clear_disk_stats", disk_id="abc:local", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Strict guard tests: no network calls escape when unconfirmed
|
||||
# ---------------------------------------------------------------------------
|
||||
async def test_array_stop_array_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"array": {"setState": {"state": "STOPPED"}}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="array", subaction="stop_array", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
class TestNoGraphQLCallsWhenUnconfirmed:
|
||||
"""The most critical safety property: when confirm is missing/False,
|
||||
NO GraphQL request must ever reach the network layer. This verifies that
|
||||
the guard fires before any I/O, not just that a ToolError is raised.
|
||||
"""
|
||||
|
||||
@pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_no_graphql_call_without_confirm(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
kwargs: dict,
|
||||
_mock_array_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_storage_graphql: AsyncMock,
|
||||
_mock_settings_graphql: AsyncMock,
|
||||
_mock_plugins_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""make_graphql_request must NOT be called when confirm is absent."""
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
mock_map = {
|
||||
"array": _mock_array_graphql,
|
||||
"vm": _mock_vm_graphql,
|
||||
"notifications": _mock_notif_graphql,
|
||||
"rclone": _mock_rclone_graphql,
|
||||
"keys": _mock_keys_graphql,
|
||||
"storage": _mock_storage_graphql,
|
||||
"settings": _mock_settings_graphql,
|
||||
"plugins": _mock_plugins_graphql,
|
||||
}
|
||||
|
||||
with pytest.raises(ToolError):
|
||||
await tool_fn(action=action, **kwargs)
|
||||
|
||||
mock_map[tool_key].assert_not_called()
|
||||
|
||||
@pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_no_graphql_call_with_confirm_false(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
kwargs: dict,
|
||||
_mock_array_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_storage_graphql: AsyncMock,
|
||||
_mock_settings_graphql: AsyncMock,
|
||||
_mock_plugins_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""make_graphql_request must NOT be called when confirm=False."""
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
mock_map = {
|
||||
"array": _mock_array_graphql,
|
||||
"vm": _mock_vm_graphql,
|
||||
"notifications": _mock_notif_graphql,
|
||||
"rclone": _mock_rclone_graphql,
|
||||
"keys": _mock_keys_graphql,
|
||||
"storage": _mock_storage_graphql,
|
||||
"settings": _mock_settings_graphql,
|
||||
"plugins": _mock_plugins_graphql,
|
||||
}
|
||||
|
||||
with pytest.raises(ToolError):
|
||||
await tool_fn(action=action, confirm=False, **kwargs)
|
||||
|
||||
mock_map[tool_key].assert_not_called()
|
||||
async def test_plugins_remove_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"removePlugin": True}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="plugin", subaction="remove", names=["my-plugin"], confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -572,57 +379,35 @@ class TestNoGraphQLCallsWhenUnconfirmed:
|
||||
|
||||
|
||||
class TestNonDestructiveActionsNeverRequireConfirm:
|
||||
"""Guard regression test: non-destructive mutations must work without confirm.
|
||||
|
||||
If a non-destructive action starts requiring confirm=True (over-guarding),
|
||||
it would break normal use cases. This test class prevents that regression.
|
||||
"""
|
||||
"""Guard regression: non-destructive ops must work without confirm."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"tool_key,action,kwargs,mock_return",
|
||||
"action,subaction,kwargs,mock_return",
|
||||
[
|
||||
("array", "parity_cancel", {}, {"parityCheck": {"cancel": True}}),
|
||||
("vm", "start", {"vm_id": "test-uuid"}, {"vm": {"start": True}}),
|
||||
("notifications", "archive_all", {}, {"archiveAll": {"info": 0, "total": 0}}),
|
||||
("notification", "archive_all", {}, {"archiveAll": {"info": 0, "total": 0}}),
|
||||
("rclone", "list_remotes", {}, {"rclone": {"remotes": []}}),
|
||||
("keys", "list", {}, {"apiKeys": []}),
|
||||
("key", "list", {}, {"apiKeys": []}),
|
||||
],
|
||||
ids=[
|
||||
"array/parity_cancel",
|
||||
"vm/start",
|
||||
"notifications/archive_all",
|
||||
"notification/archive_all",
|
||||
"rclone/list_remotes",
|
||||
"keys/list",
|
||||
"key/list",
|
||||
],
|
||||
)
|
||||
async def test_non_destructive_action_works_without_confirm(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
mock_return: dict,
|
||||
_mock_array_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_storage_graphql: AsyncMock,
|
||||
_mock_settings_graphql: AsyncMock,
|
||||
_mock_plugins_graphql: AsyncMock,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""Non-destructive actions must not raise ToolError for missing confirm."""
|
||||
mock_map = {
|
||||
"array": _mock_array_graphql,
|
||||
"vm": _mock_vm_graphql,
|
||||
"notifications": _mock_notif_graphql,
|
||||
"rclone": _mock_rclone_graphql,
|
||||
"keys": _mock_keys_graphql,
|
||||
}
|
||||
mock_map[tool_key].return_value = mock_return
|
||||
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
# Just verify no ToolError is raised for missing confirm — return shape varies by action
|
||||
result = await tool_fn(action=action, **kwargs)
|
||||
_mock_graphql.return_value = mock_return
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action=action, subaction=subaction, **kwargs)
|
||||
assert result is not None
|
||||
mock_map[tool_key].assert_called_once()
|
||||
_mock_graphql.assert_called_once()
|
||||
|
||||
@@ -28,6 +28,46 @@ def _validate_operation(schema: GraphQLSchema, query_str: str) -> list[str]:
|
||||
return [str(e) for e in errors]
|
||||
|
||||
|
||||
def _all_domain_dicts(unraid_mod: object) -> list[tuple[str, dict[str, str]]]:
|
||||
"""Return all query/mutation dicts from the consolidated unraid module.
|
||||
|
||||
Single source of truth used by both test_all_tool_queries_validate and
|
||||
test_total_operations_count so the two lists stay in sync automatically.
|
||||
"""
|
||||
import types
|
||||
|
||||
m = unraid_mod
|
||||
if not isinstance(m, types.ModuleType):
|
||||
import importlib
|
||||
|
||||
m = importlib.import_module("unraid_mcp.tools.unraid")
|
||||
|
||||
return [
|
||||
("system/QUERIES", m._SYSTEM_QUERIES),
|
||||
("array/QUERIES", m._ARRAY_QUERIES),
|
||||
("array/MUTATIONS", m._ARRAY_MUTATIONS),
|
||||
("disk/QUERIES", m._DISK_QUERIES),
|
||||
("disk/MUTATIONS", m._DISK_MUTATIONS),
|
||||
("docker/QUERIES", m._DOCKER_QUERIES),
|
||||
("docker/MUTATIONS", m._DOCKER_MUTATIONS),
|
||||
("vm/QUERIES", m._VM_QUERIES),
|
||||
("vm/MUTATIONS", m._VM_MUTATIONS),
|
||||
("notification/QUERIES", m._NOTIFICATION_QUERIES),
|
||||
("notification/MUTATIONS", m._NOTIFICATION_MUTATIONS),
|
||||
("rclone/QUERIES", m._RCLONE_QUERIES),
|
||||
("rclone/MUTATIONS", m._RCLONE_MUTATIONS),
|
||||
("user/QUERIES", m._USER_QUERIES),
|
||||
("key/QUERIES", m._KEY_QUERIES),
|
||||
("key/MUTATIONS", m._KEY_MUTATIONS),
|
||||
("setting/MUTATIONS", m._SETTING_MUTATIONS),
|
||||
("customization/QUERIES", m._CUSTOMIZATION_QUERIES),
|
||||
("customization/MUTATIONS", m._CUSTOMIZATION_MUTATIONS),
|
||||
("plugin/QUERIES", m._PLUGIN_QUERIES),
|
||||
("plugin/MUTATIONS", m._PLUGIN_MUTATIONS),
|
||||
("oidc/QUERIES", m._OIDC_QUERIES),
|
||||
]
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Info Tool (19 queries)
|
||||
# ============================================================================
|
||||
@@ -35,116 +75,116 @@ class TestInfoQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/info.py."""
|
||||
|
||||
def test_overview_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["overview"])
|
||||
assert not errors, f"overview query validation failed: {errors}"
|
||||
|
||||
def test_array_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["array"])
|
||||
assert not errors, f"array query validation failed: {errors}"
|
||||
|
||||
def test_network_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["network"])
|
||||
assert not errors, f"network query validation failed: {errors}"
|
||||
|
||||
def test_registration_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["registration"])
|
||||
assert not errors, f"registration query validation failed: {errors}"
|
||||
|
||||
def test_variables_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["variables"])
|
||||
assert not errors, f"variables query validation failed: {errors}"
|
||||
|
||||
def test_metrics_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["metrics"])
|
||||
assert not errors, f"metrics query validation failed: {errors}"
|
||||
|
||||
def test_services_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["services"])
|
||||
assert not errors, f"services query validation failed: {errors}"
|
||||
|
||||
def test_display_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["display"])
|
||||
assert not errors, f"display query validation failed: {errors}"
|
||||
|
||||
def test_config_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["config"])
|
||||
assert not errors, f"config query validation failed: {errors}"
|
||||
|
||||
def test_online_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["online"])
|
||||
assert not errors, f"online query validation failed: {errors}"
|
||||
|
||||
def test_owner_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["owner"])
|
||||
assert not errors, f"owner query validation failed: {errors}"
|
||||
|
||||
def test_settings_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["settings"])
|
||||
assert not errors, f"settings query validation failed: {errors}"
|
||||
|
||||
def test_server_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["server"])
|
||||
assert not errors, f"server query validation failed: {errors}"
|
||||
|
||||
def test_servers_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["servers"])
|
||||
assert not errors, f"servers query validation failed: {errors}"
|
||||
|
||||
def test_flash_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["flash"])
|
||||
assert not errors, f"flash query validation failed: {errors}"
|
||||
|
||||
def test_ups_devices_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["ups_devices"])
|
||||
assert not errors, f"ups_devices query validation failed: {errors}"
|
||||
|
||||
def test_ups_device_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["ups_device"])
|
||||
assert not errors, f"ups_device query validation failed: {errors}"
|
||||
|
||||
def test_ups_config_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["ups_config"])
|
||||
assert not errors, f"ups_config query validation failed: {errors}"
|
||||
|
||||
def test_all_info_actions_covered(self, schema: GraphQLSchema) -> None:
|
||||
"""Ensure every key in QUERIES has a corresponding test."""
|
||||
from unraid_mcp.tools.info import QUERIES
|
||||
from unraid_mcp.tools.unraid import _SYSTEM_QUERIES as QUERIES
|
||||
|
||||
expected_actions = {
|
||||
"overview",
|
||||
@@ -165,7 +205,6 @@ class TestInfoQueries:
|
||||
"ups_devices",
|
||||
"ups_device",
|
||||
"ups_config",
|
||||
"connect",
|
||||
}
|
||||
assert set(QUERIES.keys()) == expected_actions
|
||||
|
||||
@@ -177,19 +216,19 @@ class TestArrayQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/array.py."""
|
||||
|
||||
def test_parity_status_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import QUERIES
|
||||
from unraid_mcp.tools.unraid import _ARRAY_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["parity_status"])
|
||||
assert not errors, f"parity_status query validation failed: {errors}"
|
||||
|
||||
def test_parity_history_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import QUERIES
|
||||
from unraid_mcp.tools.unraid import _ARRAY_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["parity_history"])
|
||||
assert not errors, f"parity_history query validation failed: {errors}"
|
||||
|
||||
def test_all_array_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import QUERIES
|
||||
from unraid_mcp.tools.unraid import _ARRAY_QUERIES as QUERIES
|
||||
|
||||
assert set(QUERIES.keys()) == {"parity_status", "parity_history"}
|
||||
|
||||
@@ -198,73 +237,73 @@ class TestArrayMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/array.py."""
|
||||
|
||||
def test_parity_start_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["parity_start"])
|
||||
assert not errors, f"parity_start mutation validation failed: {errors}"
|
||||
|
||||
def test_parity_pause_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["parity_pause"])
|
||||
assert not errors, f"parity_pause mutation validation failed: {errors}"
|
||||
|
||||
def test_parity_resume_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["parity_resume"])
|
||||
assert not errors, f"parity_resume mutation validation failed: {errors}"
|
||||
|
||||
def test_parity_cancel_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["parity_cancel"])
|
||||
assert not errors, f"parity_cancel mutation validation failed: {errors}"
|
||||
|
||||
def test_start_array_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["start_array"])
|
||||
assert not errors, f"start_array mutation validation failed: {errors}"
|
||||
|
||||
def test_stop_array_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["stop_array"])
|
||||
assert not errors, f"stop_array mutation validation failed: {errors}"
|
||||
|
||||
def test_add_disk_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["add_disk"])
|
||||
assert not errors, f"add_disk mutation validation failed: {errors}"
|
||||
|
||||
def test_remove_disk_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["remove_disk"])
|
||||
assert not errors, f"remove_disk mutation validation failed: {errors}"
|
||||
|
||||
def test_mount_disk_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["mount_disk"])
|
||||
assert not errors, f"mount_disk mutation validation failed: {errors}"
|
||||
|
||||
def test_unmount_disk_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["unmount_disk"])
|
||||
assert not errors, f"unmount_disk mutation validation failed: {errors}"
|
||||
|
||||
def test_clear_disk_stats_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["clear_disk_stats"])
|
||||
assert not errors, f"clear_disk_stats mutation validation failed: {errors}"
|
||||
|
||||
def test_all_array_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.array import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _ARRAY_MUTATIONS as MUTATIONS
|
||||
|
||||
expected = {
|
||||
"parity_start",
|
||||
@@ -289,37 +328,37 @@ class TestStorageQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/storage.py."""
|
||||
|
||||
def test_shares_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.storage import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DISK_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["shares"])
|
||||
assert not errors, f"shares query validation failed: {errors}"
|
||||
|
||||
def test_disks_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.storage import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DISK_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["disks"])
|
||||
assert not errors, f"disks query validation failed: {errors}"
|
||||
|
||||
def test_disk_details_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.storage import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DISK_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["disk_details"])
|
||||
assert not errors, f"disk_details query validation failed: {errors}"
|
||||
|
||||
def test_log_files_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.storage import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DISK_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["log_files"])
|
||||
assert not errors, f"log_files query validation failed: {errors}"
|
||||
|
||||
def test_logs_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.storage import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DISK_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["logs"])
|
||||
assert not errors, f"logs query validation failed: {errors}"
|
||||
|
||||
def test_all_storage_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.storage import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DISK_QUERIES as QUERIES
|
||||
|
||||
expected = {"shares", "disks", "disk_details", "log_files", "logs"}
|
||||
assert set(QUERIES.keys()) == expected
|
||||
@@ -329,13 +368,13 @@ class TestStorageMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/storage.py."""
|
||||
|
||||
def test_flash_backup_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.storage import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _DISK_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["flash_backup"])
|
||||
assert not errors, f"flash_backup mutation validation failed: {errors}"
|
||||
|
||||
def test_all_storage_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.storage import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _DISK_MUTATIONS as MUTATIONS
|
||||
|
||||
assert set(MUTATIONS.keys()) == {"flash_backup"}
|
||||
|
||||
@@ -347,31 +386,31 @@ class TestDockerQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/docker.py."""
|
||||
|
||||
def test_list_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.docker import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DOCKER_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["list"])
|
||||
assert not errors, f"list query validation failed: {errors}"
|
||||
|
||||
def test_details_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.docker import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DOCKER_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["details"])
|
||||
assert not errors, f"details query validation failed: {errors}"
|
||||
|
||||
def test_networks_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.docker import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DOCKER_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["networks"])
|
||||
assert not errors, f"networks query validation failed: {errors}"
|
||||
|
||||
def test_network_details_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.docker import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DOCKER_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["network_details"])
|
||||
assert not errors, f"network_details query validation failed: {errors}"
|
||||
|
||||
def test_all_docker_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.docker import QUERIES
|
||||
from unraid_mcp.tools.unraid import _DOCKER_QUERIES as QUERIES
|
||||
|
||||
expected = {
|
||||
"list",
|
||||
@@ -386,19 +425,19 @@ class TestDockerMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/docker.py."""
|
||||
|
||||
def test_start_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.docker import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _DOCKER_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["start"])
|
||||
assert not errors, f"start mutation validation failed: {errors}"
|
||||
|
||||
def test_stop_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.docker import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _DOCKER_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["stop"])
|
||||
assert not errors, f"stop mutation validation failed: {errors}"
|
||||
|
||||
def test_all_docker_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.docker import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _DOCKER_MUTATIONS as MUTATIONS
|
||||
|
||||
expected = {
|
||||
"start",
|
||||
@@ -414,19 +453,19 @@ class TestVmQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/virtualization.py."""
|
||||
|
||||
def test_list_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import QUERIES
|
||||
from unraid_mcp.tools.unraid import _VM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["list"])
|
||||
assert not errors, f"list query validation failed: {errors}"
|
||||
|
||||
def test_details_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import QUERIES
|
||||
from unraid_mcp.tools.unraid import _VM_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["details"])
|
||||
assert not errors, f"details query validation failed: {errors}"
|
||||
|
||||
def test_all_vm_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import QUERIES
|
||||
from unraid_mcp.tools.unraid import _VM_QUERIES as QUERIES
|
||||
|
||||
assert set(QUERIES.keys()) == {"list", "details"}
|
||||
|
||||
@@ -435,49 +474,49 @@ class TestVmMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/virtualization.py."""
|
||||
|
||||
def test_start_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _VM_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["start"])
|
||||
assert not errors, f"start mutation validation failed: {errors}"
|
||||
|
||||
def test_stop_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _VM_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["stop"])
|
||||
assert not errors, f"stop mutation validation failed: {errors}"
|
||||
|
||||
def test_pause_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _VM_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["pause"])
|
||||
assert not errors, f"pause mutation validation failed: {errors}"
|
||||
|
||||
def test_resume_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _VM_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["resume"])
|
||||
assert not errors, f"resume mutation validation failed: {errors}"
|
||||
|
||||
def test_force_stop_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _VM_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["force_stop"])
|
||||
assert not errors, f"force_stop mutation validation failed: {errors}"
|
||||
|
||||
def test_reboot_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _VM_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["reboot"])
|
||||
assert not errors, f"reboot mutation validation failed: {errors}"
|
||||
|
||||
def test_reset_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _VM_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["reset"])
|
||||
assert not errors, f"reset mutation validation failed: {errors}"
|
||||
|
||||
def test_all_vm_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _VM_MUTATIONS as MUTATIONS
|
||||
|
||||
expected = {"start", "stop", "pause", "resume", "force_stop", "reboot", "reset"}
|
||||
assert set(MUTATIONS.keys()) == expected
|
||||
@@ -490,19 +529,19 @@ class TestNotificationQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/notifications.py."""
|
||||
|
||||
def test_overview_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import QUERIES
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["overview"])
|
||||
assert not errors, f"overview query validation failed: {errors}"
|
||||
|
||||
def test_list_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import QUERIES
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["list"])
|
||||
assert not errors, f"list query validation failed: {errors}"
|
||||
|
||||
def test_all_notification_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import QUERIES
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_QUERIES as QUERIES
|
||||
|
||||
assert set(QUERIES.keys()) == {"overview", "list"}
|
||||
|
||||
@@ -511,72 +550,72 @@ class TestNotificationMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/notifications.py."""
|
||||
|
||||
def test_create_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["create"])
|
||||
assert not errors, f"create mutation validation failed: {errors}"
|
||||
|
||||
def test_archive_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["archive"])
|
||||
assert not errors, f"archive mutation validation failed: {errors}"
|
||||
|
||||
def test_unread_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
def test_mark_unread_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["unread"])
|
||||
assert not errors, f"unread mutation validation failed: {errors}"
|
||||
errors = _validate_operation(schema, MUTATIONS["mark_unread"])
|
||||
assert not errors, f"mark_unread mutation validation failed: {errors}"
|
||||
|
||||
def test_delete_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["delete"])
|
||||
assert not errors, f"delete mutation validation failed: {errors}"
|
||||
|
||||
def test_delete_archived_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["delete_archived"])
|
||||
assert not errors, f"delete_archived mutation validation failed: {errors}"
|
||||
|
||||
def test_archive_all_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["archive_all"])
|
||||
assert not errors, f"archive_all mutation validation failed: {errors}"
|
||||
|
||||
def test_archive_many_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["archive_many"])
|
||||
assert not errors, f"archive_many mutation validation failed: {errors}"
|
||||
|
||||
def test_unarchive_many_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["unarchive_many"])
|
||||
assert not errors, f"unarchive_many mutation validation failed: {errors}"
|
||||
|
||||
def test_unarchive_all_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["unarchive_all"])
|
||||
assert not errors, f"unarchive_all mutation validation failed: {errors}"
|
||||
|
||||
def test_recalculate_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["recalculate"])
|
||||
assert not errors, f"recalculate mutation validation failed: {errors}"
|
||||
|
||||
def test_all_notification_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.notifications import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _NOTIFICATION_MUTATIONS as MUTATIONS
|
||||
|
||||
expected = {
|
||||
"create",
|
||||
"archive",
|
||||
"unread",
|
||||
"mark_unread",
|
||||
"delete",
|
||||
"delete_archived",
|
||||
"archive_all",
|
||||
@@ -595,19 +634,19 @@ class TestRcloneQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/rclone.py."""
|
||||
|
||||
def test_list_remotes_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.rclone import QUERIES
|
||||
from unraid_mcp.tools.unraid import _RCLONE_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["list_remotes"])
|
||||
assert not errors, f"list_remotes query validation failed: {errors}"
|
||||
|
||||
def test_config_form_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.rclone import QUERIES
|
||||
from unraid_mcp.tools.unraid import _RCLONE_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["config_form"])
|
||||
assert not errors, f"config_form query validation failed: {errors}"
|
||||
|
||||
def test_all_rclone_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.rclone import QUERIES
|
||||
from unraid_mcp.tools.unraid import _RCLONE_QUERIES as QUERIES
|
||||
|
||||
assert set(QUERIES.keys()) == {"list_remotes", "config_form"}
|
||||
|
||||
@@ -616,19 +655,19 @@ class TestRcloneMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/rclone.py."""
|
||||
|
||||
def test_create_remote_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.rclone import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _RCLONE_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["create_remote"])
|
||||
assert not errors, f"create_remote mutation validation failed: {errors}"
|
||||
|
||||
def test_delete_remote_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.rclone import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _RCLONE_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["delete_remote"])
|
||||
assert not errors, f"delete_remote mutation validation failed: {errors}"
|
||||
|
||||
def test_all_rclone_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.rclone import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _RCLONE_MUTATIONS as MUTATIONS
|
||||
|
||||
assert set(MUTATIONS.keys()) == {"create_remote", "delete_remote"}
|
||||
|
||||
@@ -640,13 +679,13 @@ class TestUsersQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/users.py."""
|
||||
|
||||
def test_me_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.users import QUERIES
|
||||
from unraid_mcp.tools.unraid import _USER_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["me"])
|
||||
assert not errors, f"me query validation failed: {errors}"
|
||||
|
||||
def test_all_users_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.users import QUERIES
|
||||
from unraid_mcp.tools.unraid import _USER_QUERIES as QUERIES
|
||||
|
||||
assert set(QUERIES.keys()) == {"me"}
|
||||
|
||||
@@ -658,19 +697,19 @@ class TestKeysQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/keys.py."""
|
||||
|
||||
def test_list_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import QUERIES
|
||||
from unraid_mcp.tools.unraid import _KEY_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["list"])
|
||||
assert not errors, f"list query validation failed: {errors}"
|
||||
|
||||
def test_get_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import QUERIES
|
||||
from unraid_mcp.tools.unraid import _KEY_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["get"])
|
||||
assert not errors, f"get query validation failed: {errors}"
|
||||
|
||||
def test_all_keys_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import QUERIES
|
||||
from unraid_mcp.tools.unraid import _KEY_QUERIES as QUERIES
|
||||
|
||||
assert set(QUERIES.keys()) == {"list", "get"}
|
||||
|
||||
@@ -679,37 +718,37 @@ class TestKeysMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/keys.py."""
|
||||
|
||||
def test_create_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _KEY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["create"])
|
||||
assert not errors, f"create mutation validation failed: {errors}"
|
||||
|
||||
def test_update_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _KEY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["update"])
|
||||
assert not errors, f"update mutation validation failed: {errors}"
|
||||
|
||||
def test_delete_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _KEY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["delete"])
|
||||
assert not errors, f"delete mutation validation failed: {errors}"
|
||||
|
||||
def test_add_role_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _KEY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["add_role"])
|
||||
assert not errors, f"add_role mutation validation failed: {errors}"
|
||||
|
||||
def test_remove_role_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _KEY_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["remove_role"])
|
||||
assert not errors, f"remove_role mutation validation failed: {errors}"
|
||||
|
||||
def test_all_keys_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.keys import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _KEY_MUTATIONS as MUTATIONS
|
||||
|
||||
assert set(MUTATIONS.keys()) == {"create", "update", "delete", "add_role", "remove_role"}
|
||||
|
||||
@@ -721,19 +760,19 @@ class TestSettingsMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/settings.py."""
|
||||
|
||||
def test_update_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.settings import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _SETTING_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["update"])
|
||||
assert not errors, f"update mutation validation failed: {errors}"
|
||||
|
||||
def test_configure_ups_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.settings import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _SETTING_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["configure_ups"])
|
||||
assert not errors, f"configure_ups mutation validation failed: {errors}"
|
||||
|
||||
def test_all_settings_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.settings import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _SETTING_MUTATIONS as MUTATIONS
|
||||
|
||||
expected = {
|
||||
"update",
|
||||
@@ -790,7 +829,7 @@ class TestCustomizationQueries:
|
||||
assert not errors, f"is_initial_setup (isFreshInstall) query validation failed: {errors}"
|
||||
|
||||
def test_sso_enabled_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.customization import QUERIES
|
||||
from unraid_mcp.tools.unraid import _CUSTOMIZATION_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["sso_enabled"])
|
||||
assert not errors, f"sso_enabled query validation failed: {errors}"
|
||||
@@ -805,13 +844,13 @@ class TestCustomizationMutations:
|
||||
"""Validate mutations from unraid_mcp/tools/customization.py."""
|
||||
|
||||
def test_set_theme_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.customization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _CUSTOMIZATION_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["set_theme"])
|
||||
assert not errors, f"set_theme mutation validation failed: {errors}"
|
||||
|
||||
def test_all_customization_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.customization import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _CUSTOMIZATION_MUTATIONS as MUTATIONS
|
||||
|
||||
assert set(MUTATIONS.keys()) == {"set_theme"}
|
||||
|
||||
@@ -823,13 +862,13 @@ class TestPluginsQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/plugins.py."""
|
||||
|
||||
def test_list_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.plugins import QUERIES
|
||||
from unraid_mcp.tools.unraid import _PLUGIN_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["list"])
|
||||
assert not errors, f"plugins list query validation failed: {errors}"
|
||||
|
||||
def test_all_plugins_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.plugins import QUERIES
|
||||
from unraid_mcp.tools.unraid import _PLUGIN_QUERIES as QUERIES
|
||||
|
||||
assert set(QUERIES.keys()) == {"list"}
|
||||
|
||||
@@ -838,19 +877,19 @@ class TestPluginsMutations:
|
||||
"""Validate all mutations from unraid_mcp/tools/plugins.py."""
|
||||
|
||||
def test_add_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.plugins import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _PLUGIN_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["add"])
|
||||
assert not errors, f"plugins add mutation validation failed: {errors}"
|
||||
|
||||
def test_remove_mutation(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.plugins import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _PLUGIN_MUTATIONS as MUTATIONS
|
||||
|
||||
errors = _validate_operation(schema, MUTATIONS["remove"])
|
||||
assert not errors, f"plugins remove mutation validation failed: {errors}"
|
||||
|
||||
def test_all_plugins_mutations_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.plugins import MUTATIONS
|
||||
from unraid_mcp.tools.unraid import _PLUGIN_MUTATIONS as MUTATIONS
|
||||
|
||||
assert set(MUTATIONS.keys()) == {"add", "remove"}
|
||||
|
||||
@@ -862,37 +901,37 @@ class TestOidcQueries:
|
||||
"""Validate all queries from unraid_mcp/tools/oidc.py."""
|
||||
|
||||
def test_providers_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.oidc import QUERIES
|
||||
from unraid_mcp.tools.unraid import _OIDC_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["providers"])
|
||||
assert not errors, f"oidc providers query validation failed: {errors}"
|
||||
|
||||
def test_provider_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.oidc import QUERIES
|
||||
from unraid_mcp.tools.unraid import _OIDC_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["provider"])
|
||||
assert not errors, f"oidc provider query validation failed: {errors}"
|
||||
|
||||
def test_configuration_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.oidc import QUERIES
|
||||
from unraid_mcp.tools.unraid import _OIDC_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["configuration"])
|
||||
assert not errors, f"oidc configuration query validation failed: {errors}"
|
||||
|
||||
def test_public_providers_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.oidc import QUERIES
|
||||
from unraid_mcp.tools.unraid import _OIDC_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["public_providers"])
|
||||
assert not errors, f"oidc public_providers query validation failed: {errors}"
|
||||
|
||||
def test_validate_session_query(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.oidc import QUERIES
|
||||
from unraid_mcp.tools.unraid import _OIDC_QUERIES as QUERIES
|
||||
|
||||
errors = _validate_operation(schema, QUERIES["validate_session"])
|
||||
assert not errors, f"oidc validate_session query validation failed: {errors}"
|
||||
|
||||
def test_all_oidc_queries_covered(self, schema: GraphQLSchema) -> None:
|
||||
from unraid_mcp.tools.oidc import QUERIES
|
||||
from unraid_mcp.tools.unraid import _OIDC_QUERIES as QUERIES
|
||||
|
||||
expected = {
|
||||
"providers",
|
||||
@@ -911,36 +950,20 @@ class TestSchemaCompleteness:
|
||||
"""Validate that all tool operations are covered by the schema."""
|
||||
|
||||
def test_all_tool_queries_validate(self, schema: GraphQLSchema) -> None:
|
||||
"""Bulk-validate every query across all tools.
|
||||
"""Bulk-validate every query/mutation across all domains in the consolidated unraid module.
|
||||
|
||||
Known schema mismatches are tracked in KNOWN_SCHEMA_ISSUES and excluded
|
||||
from the assertion so the test suite stays green while the underlying
|
||||
tool queries are fixed incrementally.
|
||||
"""
|
||||
import importlib
|
||||
import unraid_mcp.tools.unraid as unraid_mod
|
||||
|
||||
tool_modules = [
|
||||
"unraid_mcp.tools.info",
|
||||
"unraid_mcp.tools.array",
|
||||
"unraid_mcp.tools.storage",
|
||||
"unraid_mcp.tools.docker",
|
||||
"unraid_mcp.tools.virtualization",
|
||||
"unraid_mcp.tools.notifications",
|
||||
"unraid_mcp.tools.rclone",
|
||||
"unraid_mcp.tools.users",
|
||||
"unraid_mcp.tools.keys",
|
||||
"unraid_mcp.tools.settings",
|
||||
"unraid_mcp.tools.customization",
|
||||
"unraid_mcp.tools.plugins",
|
||||
"unraid_mcp.tools.oidc",
|
||||
]
|
||||
# All query/mutation dicts in the consolidated module, keyed by domain/type label
|
||||
all_operation_dicts = _all_domain_dicts(unraid_mod)
|
||||
|
||||
# Known schema mismatches in tool QUERIES/MUTATIONS dicts.
|
||||
# These represent bugs in the tool implementation, not in the tests.
|
||||
# Remove entries from this set as they are fixed.
|
||||
# Known schema mismatches — bugs in tool implementation, not in tests.
|
||||
# Remove entries as they are fixed.
|
||||
KNOWN_SCHEMA_ISSUES: set[str] = {
|
||||
# storage: unassignedDevices not in Query type
|
||||
"storage/QUERIES/unassigned",
|
||||
# customization: Customization.theme field does not exist
|
||||
"customization/QUERIES/theme",
|
||||
# customization: publicPartnerInfo not in Query type
|
||||
@@ -953,26 +976,10 @@ class TestSchemaCompleteness:
|
||||
unexpected_passes: list[str] = []
|
||||
total = 0
|
||||
|
||||
for module_path in tool_modules:
|
||||
mod = importlib.import_module(module_path)
|
||||
tool_name = module_path.split(".")[-1]
|
||||
|
||||
queries = getattr(mod, "QUERIES", {})
|
||||
for action, query_str in queries.items():
|
||||
for label, ops_dict in all_operation_dicts:
|
||||
for action, query_str in ops_dict.items():
|
||||
total += 1
|
||||
key = f"{tool_name}/QUERIES/{action}"
|
||||
errors = _validate_operation(schema, query_str)
|
||||
if errors:
|
||||
if key not in KNOWN_SCHEMA_ISSUES:
|
||||
failures.append(f"{key}: {errors[0]}")
|
||||
else:
|
||||
if key in KNOWN_SCHEMA_ISSUES:
|
||||
unexpected_passes.append(key)
|
||||
|
||||
mutations = getattr(mod, "MUTATIONS", {})
|
||||
for action, query_str in mutations.items():
|
||||
total += 1
|
||||
key = f"{tool_name}/MUTATIONS/{action}"
|
||||
key = f"{label}/{action}"
|
||||
errors = _validate_operation(schema, query_str)
|
||||
if errors:
|
||||
if key not in KNOWN_SCHEMA_ISSUES:
|
||||
@@ -982,7 +989,6 @@ class TestSchemaCompleteness:
|
||||
unexpected_passes.append(key)
|
||||
|
||||
if unexpected_passes:
|
||||
# A known issue was fixed — remove it from KNOWN_SCHEMA_ISSUES
|
||||
raise AssertionError(
|
||||
"The following operations are listed in KNOWN_SCHEMA_ISSUES but now pass — "
|
||||
"remove them from the set:\n" + "\n".join(unexpected_passes)
|
||||
@@ -1003,29 +1009,9 @@ class TestSchemaCompleteness:
|
||||
|
||||
def test_total_operations_count(self, schema: GraphQLSchema) -> None:
|
||||
"""Verify the expected number of tool operations exist."""
|
||||
import importlib
|
||||
import unraid_mcp.tools.unraid as unraid_mod
|
||||
|
||||
tool_modules = [
|
||||
"unraid_mcp.tools.info",
|
||||
"unraid_mcp.tools.array",
|
||||
"unraid_mcp.tools.storage",
|
||||
"unraid_mcp.tools.docker",
|
||||
"unraid_mcp.tools.virtualization",
|
||||
"unraid_mcp.tools.notifications",
|
||||
"unraid_mcp.tools.rclone",
|
||||
"unraid_mcp.tools.users",
|
||||
"unraid_mcp.tools.keys",
|
||||
"unraid_mcp.tools.settings",
|
||||
"unraid_mcp.tools.customization",
|
||||
"unraid_mcp.tools.plugins",
|
||||
"unraid_mcp.tools.oidc",
|
||||
]
|
||||
all_dicts = [d for _, d in _all_domain_dicts(unraid_mod)]
|
||||
|
||||
total = 0
|
||||
for module_path in tool_modules:
|
||||
mod = importlib.import_module(module_path)
|
||||
total += len(getattr(mod, "QUERIES", {}))
|
||||
total += len(getattr(mod, "MUTATIONS", {}))
|
||||
|
||||
# Operations across all tools (queries + mutations in dicts)
|
||||
assert total >= 50, f"Expected at least 50 operations, found {total}"
|
||||
total = sum(len(d) for d in all_dicts)
|
||||
assert total >= 90, f"Expected at least 90 operations, found {total}"
|
||||
|
||||
155
tests/test_api_key_auth.py
Normal file
155
tests/test_api_key_auth.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""Tests for ApiKeyVerifier and _build_auth() in server.py."""
|
||||
|
||||
import importlib
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
import unraid_mcp.server as srv
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ApiKeyVerifier unit tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_key_verifier_accepts_correct_key():
|
||||
"""Returns AccessToken when the presented token matches the configured key."""
|
||||
verifier = srv.ApiKeyVerifier("secret-key-abc123")
|
||||
result = await verifier.verify_token("secret-key-abc123")
|
||||
|
||||
assert result is not None
|
||||
assert result.client_id == "api-key-client"
|
||||
assert result.token == "secret-key-abc123"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_key_verifier_rejects_wrong_key():
|
||||
"""Returns None when the token does not match."""
|
||||
verifier = srv.ApiKeyVerifier("secret-key-abc123")
|
||||
result = await verifier.verify_token("wrong-key")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_key_verifier_rejects_empty_token():
|
||||
"""Returns None for an empty string token."""
|
||||
verifier = srv.ApiKeyVerifier("secret-key-abc123")
|
||||
result = await verifier.verify_token("")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_key_verifier_empty_key_rejects_empty_token():
|
||||
"""When initialised with empty key, even an empty token is rejected.
|
||||
|
||||
An empty UNRAID_MCP_API_KEY means auth is disabled — ApiKeyVerifier
|
||||
should not be instantiated in that case. But if it is, it must not
|
||||
grant access via an empty bearer token.
|
||||
"""
|
||||
verifier = srv.ApiKeyVerifier("")
|
||||
result = await verifier.verify_token("")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _build_auth() integration tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_build_auth_returns_none_when_nothing_configured(monkeypatch):
|
||||
"""Returns None when neither Google OAuth nor API key is set."""
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "")
|
||||
monkeypatch.setenv("UNRAID_MCP_API_KEY", "")
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
result = srv._build_auth()
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_build_auth_returns_api_key_verifier_when_only_api_key_set(monkeypatch):
|
||||
"""Returns ApiKeyVerifier when UNRAID_MCP_API_KEY is set but Google OAuth is not."""
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "")
|
||||
monkeypatch.setenv("UNRAID_MCP_API_KEY", "my-secret-api-key")
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
result = srv._build_auth()
|
||||
assert isinstance(result, srv.ApiKeyVerifier)
|
||||
|
||||
|
||||
def test_build_auth_returns_google_provider_when_only_oauth_set(monkeypatch):
|
||||
"""Returns GoogleProvider when Google OAuth vars are set but no API key."""
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "test-id.apps.googleusercontent.com")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "GOCSPX-test-secret")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "http://10.1.0.2:6970")
|
||||
monkeypatch.setenv("UNRAID_MCP_API_KEY", "")
|
||||
monkeypatch.setenv("UNRAID_MCP_JWT_SIGNING_KEY", "x" * 32)
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
mock_provider = MagicMock()
|
||||
with patch("unraid_mcp.server.GoogleProvider", return_value=mock_provider):
|
||||
result = srv._build_auth()
|
||||
|
||||
assert result is mock_provider
|
||||
|
||||
|
||||
def test_build_auth_returns_multi_auth_when_both_configured(monkeypatch):
|
||||
"""Returns MultiAuth when both Google OAuth and UNRAID_MCP_API_KEY are set."""
|
||||
from fastmcp.server.auth import MultiAuth
|
||||
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "test-id.apps.googleusercontent.com")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "GOCSPX-test-secret")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "http://10.1.0.2:6970")
|
||||
monkeypatch.setenv("UNRAID_MCP_API_KEY", "my-secret-api-key")
|
||||
monkeypatch.setenv("UNRAID_MCP_JWT_SIGNING_KEY", "x" * 32)
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
mock_provider = MagicMock()
|
||||
with patch("unraid_mcp.server.GoogleProvider", return_value=mock_provider):
|
||||
result = srv._build_auth()
|
||||
|
||||
assert isinstance(result, MultiAuth)
|
||||
# Server is the Google provider
|
||||
assert result.server is mock_provider
|
||||
# One additional verifier — the ApiKeyVerifier
|
||||
assert len(result.verifiers) == 1
|
||||
assert isinstance(result.verifiers[0], srv.ApiKeyVerifier)
|
||||
|
||||
|
||||
def test_build_auth_multi_auth_api_key_verifier_uses_correct_key(monkeypatch):
|
||||
"""The ApiKeyVerifier inside MultiAuth is seeded with the configured key."""
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "test-id.apps.googleusercontent.com")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "GOCSPX-test-secret")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "http://10.1.0.2:6970")
|
||||
monkeypatch.setenv("UNRAID_MCP_API_KEY", "super-secret-token")
|
||||
monkeypatch.setenv("UNRAID_MCP_JWT_SIGNING_KEY", "x" * 32)
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
with patch("unraid_mcp.server.GoogleProvider", return_value=MagicMock()):
|
||||
result = srv._build_auth()
|
||||
|
||||
verifier = result.verifiers[0]
|
||||
assert verifier._api_key == "super-secret-token"
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_array tool."""
|
||||
"""Tests for array subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,36 +11,36 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.array.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.array", "register_array_tool", "unraid_array")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestArrayValidation:
|
||||
async def test_invalid_action_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
async def test_invalid_subaction_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="start")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="array", subaction="start")
|
||||
|
||||
async def test_removed_actions_are_invalid(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
for action in (
|
||||
for subaction in (
|
||||
"start",
|
||||
"stop",
|
||||
"shutdown",
|
||||
"reboot",
|
||||
"clear_stats",
|
||||
):
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action=action)
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="array", subaction=subaction)
|
||||
|
||||
async def test_parity_start_requires_correct(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="correct is required"):
|
||||
await tool_fn(action="parity_start")
|
||||
await tool_fn(action="array", subaction="parity_start")
|
||||
_mock_graphql.assert_not_called()
|
||||
|
||||
|
||||
@@ -48,9 +48,9 @@ class TestArrayActions:
|
||||
async def test_parity_start(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start", correct=False)
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "parity_start"
|
||||
assert result["subaction"] == "parity_start"
|
||||
_mock_graphql.assert_called_once()
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"correct": False}
|
||||
@@ -58,7 +58,7 @@ class TestArrayActions:
|
||||
async def test_parity_start_with_correct(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start", correct=True)
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=True)
|
||||
assert result["success"] is True
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"correct": True}
|
||||
@@ -66,32 +66,32 @@ class TestArrayActions:
|
||||
async def test_parity_status(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"array": {"parityCheckStatus": {"progress": 50}}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_status")
|
||||
result = await tool_fn(action="array", subaction="parity_status")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_parity_pause(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"pause": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_pause")
|
||||
result = await tool_fn(action="array", subaction="parity_pause")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_parity_resume(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"resume": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_resume")
|
||||
result = await tool_fn(action="array", subaction="parity_resume")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_parity_cancel(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"cancel": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_cancel")
|
||||
result = await tool_fn(action="array", subaction="parity_cancel")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("disk error")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to execute array/parity_status"):
|
||||
await tool_fn(action="parity_status")
|
||||
await tool_fn(action="array", subaction="parity_status")
|
||||
|
||||
|
||||
class TestArrayMutationFailures:
|
||||
@@ -100,14 +100,14 @@ class TestArrayMutationFailures:
|
||||
async def test_parity_start_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": False}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start", correct=False)
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
assert result["success"] is True
|
||||
assert result["data"] == {"parityCheck": {"start": False}}
|
||||
|
||||
async def test_parity_start_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": None}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start", correct=False)
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
assert result["success"] is True
|
||||
assert result["data"] == {"parityCheck": {"start": None}}
|
||||
|
||||
@@ -116,7 +116,7 @@ class TestArrayMutationFailures:
|
||||
) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": {}}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start", correct=False)
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
assert result["success"] is True
|
||||
assert result["data"] == {"parityCheck": {"start": {}}}
|
||||
|
||||
@@ -124,7 +124,7 @@ class TestArrayMutationFailures:
|
||||
_mock_graphql.side_effect = TimeoutError("operation timed out")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="timed out"):
|
||||
await tool_fn(action="parity_cancel")
|
||||
await tool_fn(action="array", subaction="parity_cancel")
|
||||
|
||||
|
||||
class TestArrayNetworkErrors:
|
||||
@@ -134,13 +134,13 @@ class TestArrayNetworkErrors:
|
||||
_mock_graphql.side_effect = ToolError("HTTP error 500: Internal Server Error")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="HTTP error 500"):
|
||||
await tool_fn(action="parity_start", correct=False)
|
||||
await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
|
||||
async def test_connection_refused(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = ToolError("Network connection error: Connection refused")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Network connection error"):
|
||||
await tool_fn(action="parity_status")
|
||||
await tool_fn(action="array", subaction="parity_status")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -156,7 +156,7 @@ async def test_parity_history_returns_history(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"parityHistory": [{"date": "2026-03-01T00:00:00Z", "status": "COMPLETED", "errors": 0}]
|
||||
}
|
||||
result = await _make_tool()(action="parity_history")
|
||||
result = await _make_tool()(action="array", subaction="parity_history")
|
||||
assert result["success"] is True
|
||||
assert len(result["data"]["parityHistory"]) == 1
|
||||
|
||||
@@ -167,20 +167,20 @@ async def test_parity_history_returns_history(_mock_graphql):
|
||||
@pytest.mark.asyncio
|
||||
async def test_start_array(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"setState": {"state": "STARTED"}}}
|
||||
result = await _make_tool()(action="start_array")
|
||||
result = await _make_tool()(action="array", subaction="start_array")
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_array_requires_confirm(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await _make_tool()(action="stop_array", confirm=False)
|
||||
await _make_tool()(action="array", subaction="stop_array", confirm=False)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_array_with_confirm(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"setState": {"state": "STOPPED"}}}
|
||||
result = await _make_tool()(action="stop_array", confirm=True)
|
||||
result = await _make_tool()(action="array", subaction="stop_array", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@@ -190,13 +190,13 @@ async def test_stop_array_with_confirm(_mock_graphql):
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_disk_requires_disk_id(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="disk_id"):
|
||||
await _make_tool()(action="add_disk")
|
||||
await _make_tool()(action="array", subaction="add_disk")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_disk_success(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"addDiskToArray": {"state": "STARTED"}}}
|
||||
result = await _make_tool()(action="add_disk", disk_id="abc123:local")
|
||||
result = await _make_tool()(action="array", subaction="add_disk", disk_id="abc123:local")
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@@ -206,13 +206,17 @@ async def test_add_disk_success(_mock_graphql):
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_disk_requires_confirm(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await _make_tool()(action="remove_disk", disk_id="abc123:local", confirm=False)
|
||||
await _make_tool()(
|
||||
action="array", subaction="remove_disk", disk_id="abc123:local", confirm=False
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_disk_with_confirm(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"removeDiskFromArray": {"state": "STOPPED"}}}
|
||||
result = await _make_tool()(action="remove_disk", disk_id="abc123:local", confirm=True)
|
||||
result = await _make_tool()(
|
||||
action="array", subaction="remove_disk", disk_id="abc123:local", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@@ -222,13 +226,13 @@ async def test_remove_disk_with_confirm(_mock_graphql):
|
||||
@pytest.mark.asyncio
|
||||
async def test_mount_disk_requires_disk_id(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="disk_id"):
|
||||
await _make_tool()(action="mount_disk")
|
||||
await _make_tool()(action="array", subaction="mount_disk")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_unmount_disk_success(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"unmountArrayDisk": {"id": "abc123:local"}}}
|
||||
result = await _make_tool()(action="unmount_disk", disk_id="abc123:local")
|
||||
result = await _make_tool()(action="array", subaction="unmount_disk", disk_id="abc123:local")
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@@ -238,11 +242,15 @@ async def test_unmount_disk_success(_mock_graphql):
|
||||
@pytest.mark.asyncio
|
||||
async def test_clear_disk_stats_requires_confirm(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await _make_tool()(action="clear_disk_stats", disk_id="abc123:local", confirm=False)
|
||||
await _make_tool()(
|
||||
action="array", subaction="clear_disk_stats", disk_id="abc123:local", confirm=False
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_clear_disk_stats_with_confirm(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"clearArrayDiskStatistics": True}}
|
||||
result = await _make_tool()(action="clear_disk_stats", disk_id="abc123:local", confirm=True)
|
||||
result = await _make_tool()(
|
||||
action="array", subaction="clear_disk_stats", disk_id="abc123:local", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
115
tests/test_auth_builder.py
Normal file
115
tests/test_auth_builder.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""Tests for _build_google_auth() in server.py."""
|
||||
|
||||
import importlib
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from unraid_mcp.server import _build_google_auth
|
||||
|
||||
|
||||
def test_build_google_auth_returns_none_when_unconfigured(monkeypatch):
|
||||
"""Returns None when Google OAuth env vars are absent."""
|
||||
# Use explicit empty values so dotenv reload cannot re-inject from ~/.unraid-mcp/.env.
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "")
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
result = _build_google_auth()
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_build_google_auth_returns_provider_when_configured(monkeypatch):
|
||||
"""Returns GoogleProvider instance when all required vars are set."""
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "test-id.apps.googleusercontent.com")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "GOCSPX-test-secret")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "http://10.1.0.2:6970")
|
||||
monkeypatch.setenv("UNRAID_MCP_JWT_SIGNING_KEY", "x" * 32)
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
mock_provider = MagicMock()
|
||||
mock_provider_class = MagicMock(return_value=mock_provider)
|
||||
|
||||
with patch("unraid_mcp.server.GoogleProvider", mock_provider_class):
|
||||
result = _build_google_auth()
|
||||
|
||||
assert result is mock_provider
|
||||
mock_provider_class.assert_called_once_with(
|
||||
client_id="test-id.apps.googleusercontent.com",
|
||||
client_secret="GOCSPX-test-secret",
|
||||
base_url="http://10.1.0.2:6970",
|
||||
extra_authorize_params={"access_type": "online", "prompt": "consent"},
|
||||
require_authorization_consent=False,
|
||||
jwt_signing_key="x" * 32,
|
||||
)
|
||||
|
||||
|
||||
def test_build_google_auth_omits_jwt_key_when_empty(monkeypatch):
|
||||
"""jwt_signing_key is omitted (not passed as empty string) when not set."""
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "test-id.apps.googleusercontent.com")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "GOCSPX-test-secret")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "http://10.1.0.2:6970")
|
||||
# Use setenv("") not delenv so dotenv reload can't re-inject from ~/.unraid-mcp/.env
|
||||
monkeypatch.setenv("UNRAID_MCP_JWT_SIGNING_KEY", "")
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
mock_provider_class = MagicMock(return_value=MagicMock())
|
||||
|
||||
with patch("unraid_mcp.server.GoogleProvider", mock_provider_class):
|
||||
_build_google_auth()
|
||||
|
||||
call_kwargs = mock_provider_class.call_args.kwargs
|
||||
assert "jwt_signing_key" not in call_kwargs
|
||||
|
||||
|
||||
def test_build_google_auth_warns_on_stdio_transport(monkeypatch):
|
||||
"""Logs a warning when Google auth is configured but transport is stdio."""
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_ID", "test-id.apps.googleusercontent.com")
|
||||
monkeypatch.setenv("GOOGLE_CLIENT_SECRET", "GOCSPX-test-secret")
|
||||
monkeypatch.setenv("UNRAID_MCP_BASE_URL", "http://10.1.0.2:6970")
|
||||
monkeypatch.setenv("UNRAID_MCP_TRANSPORT", "stdio")
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
warning_messages: list[str] = []
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.server.GoogleProvider", MagicMock(return_value=MagicMock())),
|
||||
patch("unraid_mcp.server.logger") as mock_logger,
|
||||
):
|
||||
mock_logger.warning.side_effect = lambda msg, *a, **kw: warning_messages.append(msg)
|
||||
_build_google_auth()
|
||||
|
||||
assert any("stdio" in m.lower() for m in warning_messages)
|
||||
|
||||
|
||||
def test_mcp_instance_has_no_auth_by_default():
|
||||
"""The FastMCP mcp instance has no auth provider when Google vars are absent."""
|
||||
import os
|
||||
|
||||
for var in ("GOOGLE_CLIENT_ID", "GOOGLE_CLIENT_SECRET", "UNRAID_MCP_BASE_URL"):
|
||||
os.environ[var] = ""
|
||||
|
||||
import importlib
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
importlib.reload(s)
|
||||
|
||||
import unraid_mcp.server as srv
|
||||
|
||||
importlib.reload(srv)
|
||||
|
||||
# FastMCP stores auth on ._auth_provider or .auth
|
||||
auth = getattr(srv.mcp, "_auth_provider", None) or getattr(srv.mcp, "auth", None)
|
||||
assert auth is None
|
||||
91
tests/test_auth_settings.py
Normal file
91
tests/test_auth_settings.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Tests for Google OAuth settings loading."""
|
||||
|
||||
import importlib
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _reload_settings(monkeypatch, overrides: dict) -> Any:
|
||||
"""Reload settings module with given env vars set."""
|
||||
for k, v in overrides.items():
|
||||
monkeypatch.setenv(k, v)
|
||||
import unraid_mcp.config.settings as mod
|
||||
|
||||
importlib.reload(mod)
|
||||
return mod
|
||||
|
||||
|
||||
def test_google_auth_defaults_to_empty(monkeypatch):
|
||||
"""Google auth vars default to empty string when not set."""
|
||||
# Use setenv("", "") rather than delenv so dotenv reload can't re-inject values
|
||||
# from ~/.unraid-mcp/.env (load_dotenv won't override existing env vars).
|
||||
mod = _reload_settings(
|
||||
monkeypatch,
|
||||
{
|
||||
"GOOGLE_CLIENT_ID": "",
|
||||
"GOOGLE_CLIENT_SECRET": "",
|
||||
"UNRAID_MCP_BASE_URL": "",
|
||||
"UNRAID_MCP_JWT_SIGNING_KEY": "",
|
||||
},
|
||||
)
|
||||
assert mod.GOOGLE_CLIENT_ID == ""
|
||||
assert mod.GOOGLE_CLIENT_SECRET == ""
|
||||
assert mod.UNRAID_MCP_BASE_URL == ""
|
||||
assert mod.UNRAID_MCP_JWT_SIGNING_KEY == ""
|
||||
|
||||
|
||||
def test_google_auth_reads_env_vars(monkeypatch):
|
||||
"""Google auth vars are read from environment."""
|
||||
mod = _reload_settings(
|
||||
monkeypatch,
|
||||
{
|
||||
"GOOGLE_CLIENT_ID": "test-client-id.apps.googleusercontent.com",
|
||||
"GOOGLE_CLIENT_SECRET": "GOCSPX-test-secret",
|
||||
"UNRAID_MCP_BASE_URL": "http://10.1.0.2:6970",
|
||||
"UNRAID_MCP_JWT_SIGNING_KEY": "a" * 32,
|
||||
},
|
||||
)
|
||||
assert mod.GOOGLE_CLIENT_ID == "test-client-id.apps.googleusercontent.com"
|
||||
assert mod.GOOGLE_CLIENT_SECRET == "GOCSPX-test-secret"
|
||||
assert mod.UNRAID_MCP_BASE_URL == "http://10.1.0.2:6970"
|
||||
assert mod.UNRAID_MCP_JWT_SIGNING_KEY == "a" * 32
|
||||
|
||||
|
||||
def test_google_auth_enabled_requires_both_vars(monkeypatch):
|
||||
"""is_google_auth_configured() requires both client_id and client_secret."""
|
||||
# Only client_id — not configured
|
||||
mod = _reload_settings(
|
||||
monkeypatch,
|
||||
{
|
||||
"GOOGLE_CLIENT_ID": "test-id",
|
||||
"GOOGLE_CLIENT_SECRET": "",
|
||||
"UNRAID_MCP_BASE_URL": "http://10.1.0.2:6970",
|
||||
},
|
||||
)
|
||||
monkeypatch.delenv("GOOGLE_CLIENT_SECRET", raising=False)
|
||||
importlib.reload(mod)
|
||||
assert not mod.is_google_auth_configured()
|
||||
|
||||
# Both set — configured
|
||||
mod2 = _reload_settings(
|
||||
monkeypatch,
|
||||
{
|
||||
"GOOGLE_CLIENT_ID": "test-id",
|
||||
"GOOGLE_CLIENT_SECRET": "test-secret",
|
||||
"UNRAID_MCP_BASE_URL": "http://10.1.0.2:6970",
|
||||
},
|
||||
)
|
||||
assert mod2.is_google_auth_configured()
|
||||
|
||||
|
||||
def test_google_auth_requires_base_url(monkeypatch):
|
||||
"""is_google_auth_configured() is False when base_url is missing."""
|
||||
mod = _reload_settings(
|
||||
monkeypatch,
|
||||
{
|
||||
"GOOGLE_CLIENT_ID": "test-id",
|
||||
"GOOGLE_CLIENT_SECRET": "test-secret",
|
||||
},
|
||||
)
|
||||
monkeypatch.delenv("UNRAID_MCP_BASE_URL", raising=False)
|
||||
importlib.reload(mod)
|
||||
assert not mod.is_google_auth_configured()
|
||||
@@ -1,8 +1,9 @@
|
||||
# tests/test_customization.py
|
||||
"""Tests for unraid_customization tool."""
|
||||
"""Tests for customization subactions of the consolidated unraid tool."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
@@ -11,16 +12,12 @@ from conftest import make_tool_fn
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql():
|
||||
with patch("unraid_mcp.tools.customization.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn(
|
||||
"unraid_mcp.tools.customization",
|
||||
"register_customization_tool",
|
||||
"unraid_customization",
|
||||
)
|
||||
def _make_tool() -> Any:
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -28,23 +25,22 @@ async def test_theme_returns_customization(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"customization": {"theme": {"name": "azure"}, "partnerInfo": None, "activationCode": None}
|
||||
}
|
||||
result = await _make_tool()(action="theme")
|
||||
assert result["success"] is True
|
||||
result = await _make_tool()(action="customization", subaction="theme")
|
||||
assert result["customization"]["theme"]["name"] == "azure"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_public_theme(_mock_graphql):
|
||||
_mock_graphql.return_value = {"publicTheme": {"name": "black"}}
|
||||
result = await _make_tool()(action="public_theme")
|
||||
assert result["success"] is True
|
||||
result = await _make_tool()(action="customization", subaction="public_theme")
|
||||
assert result["publicTheme"]["name"] == "black"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_is_initial_setup(_mock_graphql):
|
||||
_mock_graphql.return_value = {"isInitialSetup": False}
|
||||
result = await _make_tool()(action="is_initial_setup")
|
||||
assert result["success"] is True
|
||||
assert result["data"]["isInitialSetup"] is False
|
||||
result = await _make_tool()(action="customization", subaction="is_initial_setup")
|
||||
assert result["isInitialSetup"] is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -52,7 +48,7 @@ async def test_set_theme_requires_theme(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="theme_name"):
|
||||
await _make_tool()(action="set_theme")
|
||||
await _make_tool()(action="customization", subaction="set_theme")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -60,5 +56,5 @@ async def test_set_theme_success(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"customization": {"setTheme": {"name": "azure", "showBannerImage": True}}
|
||||
}
|
||||
result = await _make_tool()(action="set_theme", theme_name="azure")
|
||||
result = await _make_tool()(action="customization", subaction="set_theme", theme_name="azure")
|
||||
assert result["success"] is True
|
||||
|
||||
@@ -1,58 +1,12 @@
|
||||
"""Tests for unraid_docker tool."""
|
||||
"""Tests for docker subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from typing import get_args
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.tools.docker import (
|
||||
DOCKER_ACTIONS,
|
||||
find_container_by_identifier,
|
||||
get_available_container_names,
|
||||
)
|
||||
|
||||
|
||||
# --- Unit tests for helpers ---
|
||||
|
||||
|
||||
class TestFindContainerByIdentifier:
|
||||
def test_by_exact_id(self) -> None:
|
||||
containers = [{"id": "abc123", "names": ["plex"]}]
|
||||
assert find_container_by_identifier("abc123", containers) == containers[0]
|
||||
|
||||
def test_by_exact_name(self) -> None:
|
||||
containers = [{"id": "abc123", "names": ["plex"]}]
|
||||
assert find_container_by_identifier("plex", containers) == containers[0]
|
||||
|
||||
def test_fuzzy_match(self) -> None:
|
||||
containers = [{"id": "abc123", "names": ["plex-media-server"]}]
|
||||
result = find_container_by_identifier("plex", containers)
|
||||
assert result == containers[0]
|
||||
|
||||
def test_not_found(self) -> None:
|
||||
containers = [{"id": "abc123", "names": ["plex"]}]
|
||||
assert find_container_by_identifier("sonarr", containers) is None
|
||||
|
||||
def test_empty_list(self) -> None:
|
||||
assert find_container_by_identifier("plex", []) is None
|
||||
|
||||
|
||||
class TestGetAvailableContainerNames:
|
||||
def test_extracts_names(self) -> None:
|
||||
containers = [
|
||||
{"names": ["plex"]},
|
||||
{"names": ["sonarr", "sonarr-v3"]},
|
||||
]
|
||||
names = get_available_container_names(containers)
|
||||
assert "plex" in names
|
||||
assert "sonarr" in names
|
||||
assert "sonarr-v3" in names
|
||||
|
||||
def test_empty(self) -> None:
|
||||
assert get_available_container_names([]) == []
|
||||
|
||||
|
||||
# --- Integration tests ---
|
||||
@@ -60,55 +14,34 @@ class TestGetAvailableContainerNames:
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestDockerValidation:
|
||||
@pytest.mark.parametrize(
|
||||
"action",
|
||||
[
|
||||
"logs",
|
||||
"port_conflicts",
|
||||
"check_updates",
|
||||
"pause",
|
||||
"unpause",
|
||||
"remove",
|
||||
"update",
|
||||
"update_all",
|
||||
"create_folder",
|
||||
"delete_entries",
|
||||
"reset_template_mappings",
|
||||
],
|
||||
)
|
||||
def test_removed_actions_are_gone(self, action: str) -> None:
|
||||
assert action not in get_args(DOCKER_ACTIONS), (
|
||||
f"Action '{action}' should have been removed from DOCKER_ACTIONS"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("action", ["start", "stop", "details"])
|
||||
@pytest.mark.parametrize("subaction", ["start", "stop", "details"])
|
||||
async def test_container_actions_require_id(
|
||||
self, _mock_graphql: AsyncMock, action: str
|
||||
self, _mock_graphql: AsyncMock, subaction: str
|
||||
) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="container_id"):
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action="docker", subaction=subaction)
|
||||
|
||||
async def test_network_details_requires_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="network_id"):
|
||||
await tool_fn(action="network_details")
|
||||
await tool_fn(action="docker", subaction="network_details")
|
||||
|
||||
async def test_non_logs_action_ignores_tail_lines_validation(
|
||||
self, _mock_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_graphql.return_value = {"docker": {"containers": []}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="docker", subaction="list")
|
||||
assert result["containers"] == []
|
||||
|
||||
|
||||
@@ -118,7 +51,7 @@ class TestDockerActions:
|
||||
"docker": {"containers": [{"id": "c1", "names": ["plex"], "state": "running"}]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="docker", subaction="list")
|
||||
assert len(result["containers"]) == 1
|
||||
|
||||
async def test_start_container(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -136,13 +69,13 @@ class TestDockerActions:
|
||||
},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="start", container_id="plex")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_networks(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"docker": {"networks": [{"id": "net:1", "name": "bridge"}]}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="networks")
|
||||
result = await tool_fn(action="docker", subaction="networks")
|
||||
assert len(result["networks"]) == 1
|
||||
|
||||
async def test_idempotent_start(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -152,7 +85,7 @@ class TestDockerActions:
|
||||
{"idempotent_success": True, "docker": {}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="start", container_id="plex")
|
||||
assert result["idempotent"] is True
|
||||
|
||||
async def test_restart(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -163,9 +96,9 @@ class TestDockerActions:
|
||||
{"docker": {"start": {"id": cid, "state": "running"}}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="restart", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="restart", container_id="plex")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "restart"
|
||||
assert result["subaction"] == "restart"
|
||||
|
||||
async def test_restart_idempotent_stop(self, _mock_graphql: AsyncMock) -> None:
|
||||
cid = "a" * 64 + ":local"
|
||||
@@ -175,7 +108,7 @@ class TestDockerActions:
|
||||
{"docker": {"start": {"id": cid, "state": "running"}}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="restart", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="restart", container_id="plex")
|
||||
assert result["success"] is True
|
||||
assert "note" in result
|
||||
|
||||
@@ -188,14 +121,14 @@ class TestDockerActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="details", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="details", container_id="plex")
|
||||
assert result["names"] == ["plex"]
|
||||
|
||||
async def test_generic_exception_wraps_in_tool_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("unexpected failure")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to execute docker/list"):
|
||||
await tool_fn(action="list")
|
||||
await tool_fn(action="docker", subaction="list")
|
||||
|
||||
async def test_short_id_prefix_ambiguous_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
@@ -214,7 +147,7 @@ class TestDockerActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="ambiguous"):
|
||||
await tool_fn(action="details", container_id="abcdef123456")
|
||||
await tool_fn(action="docker", subaction="details", container_id="abcdef123456")
|
||||
|
||||
|
||||
class TestDockerMutationFailures:
|
||||
@@ -228,7 +161,7 @@ class TestDockerMutationFailures:
|
||||
{"docker": {}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="start", container_id="plex")
|
||||
assert result["success"] is True
|
||||
assert result["container"] is None
|
||||
|
||||
@@ -240,7 +173,7 @@ class TestDockerMutationFailures:
|
||||
{"docker": {"stop": {"id": cid, "state": "running"}}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="stop", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="stop", container_id="plex")
|
||||
assert result["success"] is True
|
||||
assert result["container"]["state"] == "running"
|
||||
|
||||
@@ -254,7 +187,7 @@ class TestDockerMutationFailures:
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="timed out"):
|
||||
await tool_fn(action="start", container_id="plex")
|
||||
await tool_fn(action="docker", subaction="start", container_id="plex")
|
||||
|
||||
|
||||
class TestDockerNetworkErrors:
|
||||
@@ -267,14 +200,14 @@ class TestDockerNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Connection refused"):
|
||||
await tool_fn(action="list")
|
||||
await tool_fn(action="docker", subaction="list")
|
||||
|
||||
async def test_list_http_401_unauthorized(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""HTTP 401 should propagate as ToolError."""
|
||||
_mock_graphql.side_effect = ToolError("HTTP error 401: Unauthorized")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="401"):
|
||||
await tool_fn(action="list")
|
||||
await tool_fn(action="docker", subaction="list")
|
||||
|
||||
async def test_json_decode_error_on_list(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Invalid JSON response should be wrapped in ToolError."""
|
||||
@@ -283,4 +216,4 @@ class TestDockerNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid JSON"):
|
||||
await tool_fn(action="list")
|
||||
await tool_fn(action="docker", subaction="list")
|
||||
|
||||
90
tests/test_guards.py
Normal file
90
tests/test_guards.py
Normal file
@@ -0,0 +1,90 @@
|
||||
"""Unit tests for unraid_mcp.core.guards."""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from fastmcp.exceptions import ToolError
|
||||
|
||||
from unraid_mcp.core.guards import gate_destructive_action
|
||||
|
||||
|
||||
DESTRUCTIVE = {"delete", "wipe"}
|
||||
|
||||
|
||||
class TestGateDestructiveAction:
|
||||
"""gate_destructive_action raises ToolError or elicits based on state."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_non_destructive_action_passes_through(self) -> None:
|
||||
"""Non-destructive actions are never blocked."""
|
||||
await gate_destructive_action(None, "list", DESTRUCTIVE, False, "irrelevant")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_confirm_true_bypasses_elicitation(self) -> None:
|
||||
"""confirm=True skips elicitation entirely."""
|
||||
with patch("unraid_mcp.core.guards.elicit_destructive_confirmation") as mock_elicit:
|
||||
await gate_destructive_action(None, "delete", DESTRUCTIVE, True, "desc")
|
||||
mock_elicit.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_ctx_raises_tool_error(self) -> None:
|
||||
"""ctx=None means elicitation returns False → ToolError."""
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await gate_destructive_action(None, "delete", DESTRUCTIVE, False, "desc")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicitation_accepted_does_not_raise(self) -> None:
|
||||
"""When elicitation returns True, no ToolError is raised."""
|
||||
with patch(
|
||||
"unraid_mcp.core.guards.elicit_destructive_confirmation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=True,
|
||||
):
|
||||
await gate_destructive_action(object(), "delete", DESTRUCTIVE, False, "desc")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicitation_declined_raises_tool_error(self) -> None:
|
||||
"""When elicitation returns False, ToolError is raised."""
|
||||
with (
|
||||
patch(
|
||||
"unraid_mcp.core.guards.elicit_destructive_confirmation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=False,
|
||||
) as mock_elicit,
|
||||
pytest.raises(ToolError, match="confirm=True"),
|
||||
):
|
||||
await gate_destructive_action(object(), "delete", DESTRUCTIVE, False, "desc")
|
||||
mock_elicit.assert_called_once()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_string_description_passed_to_elicitation(self) -> None:
|
||||
"""A plain string description is forwarded as-is."""
|
||||
with patch(
|
||||
"unraid_mcp.core.guards.elicit_destructive_confirmation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=True,
|
||||
) as mock_elicit:
|
||||
await gate_destructive_action(
|
||||
object(), "delete", DESTRUCTIVE, False, "Delete everything."
|
||||
)
|
||||
_, _, desc = mock_elicit.call_args.args
|
||||
assert desc == "Delete everything."
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dict_description_resolves_by_action(self) -> None:
|
||||
"""A dict description is resolved by action key."""
|
||||
descs = {"delete": "Delete desc.", "wipe": "Wipe desc."}
|
||||
with patch(
|
||||
"unraid_mcp.core.guards.elicit_destructive_confirmation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=True,
|
||||
) as mock_elicit:
|
||||
await gate_destructive_action(object(), "wipe", DESTRUCTIVE, False, descs)
|
||||
_, _, desc = mock_elicit.call_args.args
|
||||
assert desc == "Wipe desc."
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_error_message_contains_action_name(self) -> None:
|
||||
"""ToolError message includes the action name."""
|
||||
with pytest.raises(ToolError, match="'delete'"):
|
||||
await gate_destructive_action(None, "delete", DESTRUCTIVE, False, "desc")
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Tests for unraid_health tool."""
|
||||
"""Tests for health subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
@@ -12,26 +12,26 @@ from unraid_mcp.core.utils import safe_display_url
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.health.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.health", "register_health_tool", "unraid_health")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestHealthValidation:
|
||||
async def test_invalid_action(self, _mock_graphql: AsyncMock) -> None:
|
||||
async def test_invalid_subaction(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="invalid")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="health", subaction="invalid")
|
||||
|
||||
|
||||
class TestHealthActions:
|
||||
async def test_test_connection(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"online": True}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="test_connection")
|
||||
result = await tool_fn(action="health", subaction="test_connection")
|
||||
assert result["status"] == "connected"
|
||||
assert result["online"] is True
|
||||
assert "latency_ms" in result
|
||||
@@ -46,13 +46,38 @@ class TestHealthActions:
|
||||
},
|
||||
"array": {"state": "STARTED"},
|
||||
"notifications": {"overview": {"unread": {"alert": 0, "warning": 0, "total": 3}}},
|
||||
"docker": {"containers": [{"id": "c1", "state": "running", "status": "Up 2 days"}]},
|
||||
"docker": {"containers": [{"id": "c1", "state": "RUNNING", "status": "Up 2 days"}]},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
assert result["status"] == "healthy"
|
||||
assert "api_latency_ms" in result
|
||||
|
||||
async def test_check_docker_counts_uppercase_states(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""ContainerState enum is UPPERCASE — running/stopped counts must use case-insensitive match."""
|
||||
_mock_graphql.return_value = {
|
||||
"info": {
|
||||
"machineId": "x",
|
||||
"versions": {"core": {"unraid": "7.0"}},
|
||||
"os": {"uptime": 1},
|
||||
},
|
||||
"array": {"state": "STARTED"},
|
||||
"notifications": {"overview": {"unread": {"alert": 0, "warning": 0, "total": 0}}},
|
||||
"docker": {
|
||||
"containers": [
|
||||
{"id": "c1", "state": "RUNNING"},
|
||||
{"id": "c2", "state": "RUNNING"},
|
||||
{"id": "c3", "state": "EXITED"},
|
||||
]
|
||||
},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
svc = result["docker_services"]
|
||||
assert svc["total"] == 3
|
||||
assert svc["running"] == 2
|
||||
assert svc["stopped"] == 1
|
||||
|
||||
async def test_check_warning_on_alerts(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"info": {"machineId": "abc", "versions": {"unraid": "7.2"}, "os": {"uptime": 100}},
|
||||
@@ -61,20 +86,20 @@ class TestHealthActions:
|
||||
"docker": {"containers": []},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
assert result["status"] == "warning"
|
||||
assert any("alert" in i for i in result.get("issues", []))
|
||||
|
||||
async def test_check_no_data(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
assert result["status"] == "unhealthy"
|
||||
|
||||
async def test_check_api_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = Exception("Connection refused")
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
assert result["status"] == "unhealthy"
|
||||
assert "Connection refused" in result["error"]
|
||||
|
||||
@@ -87,61 +112,60 @@ class TestHealthActions:
|
||||
"docker": {"containers": []},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
# Missing info escalates to "degraded"; alerts only escalate to "warning"
|
||||
# Severity should stay at "degraded" (not downgrade to "warning")
|
||||
assert result["status"] == "degraded"
|
||||
|
||||
async def test_diagnose_wraps_exception(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""When _diagnose_subscriptions raises, tool wraps in ToolError."""
|
||||
async def test_diagnose_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Diagnose returns subscription status."""
|
||||
tool_fn = _make_tool()
|
||||
mock_status = {"cpu": {"connection_state": "connected"}}
|
||||
mock_manager = MagicMock()
|
||||
mock_manager.get_subscription_status = AsyncMock(return_value=mock_status)
|
||||
mock_manager.auto_start_enabled = True
|
||||
mock_manager.max_reconnect_attempts = 3
|
||||
mock_manager.subscription_configs = {}
|
||||
mock_manager.active_subscriptions = {}
|
||||
mock_manager.resource_data = {}
|
||||
|
||||
mock_cache = MagicMock()
|
||||
mock_cache.statistics.return_value = MagicMock(call_tool=None)
|
||||
mock_error = MagicMock()
|
||||
mock_error.get_error_stats.return_value = {}
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager.subscription_manager", mock_manager),
|
||||
patch("unraid_mcp.subscriptions.resources.ensure_subscriptions_started", AsyncMock()),
|
||||
patch(
|
||||
"unraid_mcp.tools.health._diagnose_subscriptions",
|
||||
side_effect=RuntimeError("broken"),
|
||||
"unraid_mcp.subscriptions.utils._analyze_subscription_status",
|
||||
return_value=(0, []),
|
||||
),
|
||||
patch("unraid_mcp.server._cache_middleware", mock_cache),
|
||||
patch("unraid_mcp.server._error_middleware", mock_error),
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="diagnose")
|
||||
assert "subscriptions" in result
|
||||
assert "summary" in result
|
||||
assert "cache" in result
|
||||
assert "errors" in result
|
||||
|
||||
async def test_diagnose_wraps_exception(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""When subscription manager raises, tool wraps in ToolError."""
|
||||
tool_fn = _make_tool()
|
||||
mock_manager = MagicMock()
|
||||
mock_manager.get_subscription_status = AsyncMock(side_effect=RuntimeError("broken"))
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager.subscription_manager", mock_manager),
|
||||
patch("unraid_mcp.subscriptions.resources.ensure_subscriptions_started", AsyncMock()),
|
||||
patch(
|
||||
"unraid_mcp.subscriptions.utils._analyze_subscription_status",
|
||||
return_value=(0, []),
|
||||
),
|
||||
pytest.raises(ToolError, match="Failed to execute health/diagnose"),
|
||||
):
|
||||
await tool_fn(action="diagnose")
|
||||
|
||||
async def test_diagnose_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Diagnose returns subscription status when modules are available."""
|
||||
tool_fn = _make_tool()
|
||||
mock_status = {
|
||||
"cpu_sub": {"runtime": {"connection_state": "connected", "last_error": None}},
|
||||
}
|
||||
with patch("unraid_mcp.tools.health._diagnose_subscriptions", return_value=mock_status):
|
||||
result = await tool_fn(action="diagnose")
|
||||
assert "cpu_sub" in result
|
||||
|
||||
async def test_diagnose_import_error_internal(self) -> None:
|
||||
"""_diagnose_subscriptions raises ToolError when subscription modules are unavailable."""
|
||||
import sys
|
||||
|
||||
from unraid_mcp.tools.health import _diagnose_subscriptions
|
||||
|
||||
# Remove cached subscription modules so the import is re-triggered
|
||||
cached = {k: v for k, v in sys.modules.items() if "unraid_mcp.subscriptions" in k}
|
||||
for k in cached:
|
||||
del sys.modules[k]
|
||||
|
||||
try:
|
||||
# Replace the modules with objects that raise ImportError on access
|
||||
with (
|
||||
patch.dict(
|
||||
sys.modules,
|
||||
{
|
||||
"unraid_mcp.subscriptions": None,
|
||||
"unraid_mcp.subscriptions.manager": None,
|
||||
"unraid_mcp.subscriptions.resources": None,
|
||||
},
|
||||
),
|
||||
pytest.raises(ToolError, match="Subscription modules not available"),
|
||||
):
|
||||
await _diagnose_subscriptions()
|
||||
finally:
|
||||
# Restore cached modules
|
||||
sys.modules.update(cached)
|
||||
await tool_fn(action="health", subaction="diagnose")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -166,17 +190,20 @@ class TestSafeDisplayUrl:
|
||||
|
||||
def test_strips_path(self) -> None:
|
||||
result = safe_display_url("http://unraid.local/some/deep/path?query=1")
|
||||
assert result is not None
|
||||
assert "path" not in result
|
||||
assert "query" not in result
|
||||
|
||||
def test_strips_credentials(self) -> None:
|
||||
result = safe_display_url("https://user:password@unraid.local/graphql")
|
||||
assert result is not None
|
||||
assert "user" not in result
|
||||
assert "password" not in result
|
||||
assert result == "https://unraid.local"
|
||||
|
||||
def test_strips_query_params(self) -> None:
|
||||
result = safe_display_url("http://host.local?token=abc&key=xyz")
|
||||
assert result is not None
|
||||
assert "token" not in result
|
||||
assert "abc" not in result
|
||||
|
||||
@@ -190,23 +217,25 @@ class TestSafeDisplayUrl:
|
||||
|
||||
def test_malformed_ipv6_url_returns_unparseable(self) -> None:
|
||||
"""Malformed IPv6 brackets in netloc cause urlparse.hostname to raise ValueError."""
|
||||
# urlparse("https://[invalid") parses without error, but accessing .hostname
|
||||
# raises ValueError: Invalid IPv6 URL — this triggers the except branch.
|
||||
result = safe_display_url("https://[invalid")
|
||||
assert result == "<unparseable>"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_action_calls_elicitation() -> None:
|
||||
"""setup action triggers elicit_and_configure and returns success message."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
"""setup subaction triggers elicit_and_configure when no credentials exist."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
with patch(
|
||||
"unraid_mcp.tools.health.elicit_and_configure", new=AsyncMock(return_value=True)
|
||||
) as mock_elicit:
|
||||
result = await tool_fn(action="setup", ctx=MagicMock())
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = False
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=True)
|
||||
) as mock_elicit,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
assert mock_elicit.called
|
||||
assert "configured" in result.lower() or "success" in result.lower()
|
||||
@@ -214,13 +243,17 @@ async def test_health_setup_action_calls_elicitation() -> None:
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_action_returns_declined_message() -> None:
|
||||
"""setup action with declined elicitation returns appropriate message."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
"""setup subaction with declined elicitation returns appropriate message."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
with patch("unraid_mcp.tools.health.elicit_and_configure", new=AsyncMock(return_value=False)):
|
||||
result = await tool_fn(action="setup", ctx=MagicMock())
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = False
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=False)),
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
assert (
|
||||
"not configured" in result.lower()
|
||||
@@ -229,18 +262,156 @@ async def test_health_setup_action_returns_declined_message() -> None:
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_already_configured_and_working_no_reset() -> None:
|
||||
"""setup returns early when credentials exist, connection works, and user declines reset."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(return_value={"online": True}),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_reset_confirmation",
|
||||
new=AsyncMock(return_value=False),
|
||||
),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure") as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
mock_configure.assert_not_called()
|
||||
assert "already configured" in result.lower()
|
||||
assert "no changes" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_already_configured_user_confirms_reset() -> None:
|
||||
"""setup proceeds with elicitation when credentials exist but user confirms reset."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(return_value={"online": True}),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_reset_confirmation",
|
||||
new=AsyncMock(return_value=True),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=True)
|
||||
) as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
mock_configure.assert_called_once()
|
||||
assert "configured" in result.lower() or "success" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_credentials_exist_but_connection_fails_user_confirms() -> None:
|
||||
"""setup prompts for confirmation even on failed probe, then reconfigures if confirmed."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(side_effect=Exception("connection refused")),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_reset_confirmation",
|
||||
new=AsyncMock(return_value=True),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=True)
|
||||
) as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
mock_configure.assert_called_once()
|
||||
assert "configured" in result.lower() or "success" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_credentials_exist_connection_fails_user_declines() -> None:
|
||||
"""setup returns 'no changes' when credentials exist (even with failed probe) and user declines."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(side_effect=Exception("connection refused")),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_reset_confirmation",
|
||||
new=AsyncMock(return_value=False),
|
||||
),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure") as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
mock_configure.assert_not_called()
|
||||
assert "no changes" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_ctx_none_already_configured_returns_no_changes() -> None:
|
||||
"""When ctx=None and credentials are working, setup returns 'already configured' gracefully."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(return_value={"online": True}),
|
||||
),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure") as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=None)
|
||||
|
||||
mock_configure.assert_not_called()
|
||||
assert "already configured" in result.lower()
|
||||
assert "no changes" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_declined_message_includes_manual_path() -> None:
|
||||
"""Declined setup message includes the exact credentials file path and variable names."""
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from unraid_mcp.config.settings import CREDENTIALS_ENV_PATH
|
||||
|
||||
tool_fn = _make_tool()
|
||||
|
||||
with patch("unraid_mcp.tools.health.elicit_and_configure", new=AsyncMock(return_value=False)):
|
||||
result = await tool_fn(action="setup", ctx=MagicMock())
|
||||
real_path_str = str(CREDENTIALS_ENV_PATH)
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = False
|
||||
type(mock_path).__str__ = lambda self: real_path_str # type: ignore[method-assign]
|
||||
|
||||
assert str(CREDENTIALS_ENV_PATH) in result
|
||||
assert "UNRAID_API_URL=" in result # inline variable shown
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=False)),
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
assert real_path_str in result
|
||||
assert "UNRAID_API_URL=" in result
|
||||
assert "UNRAID_API_KEY=" in result
|
||||
|
||||
@@ -1,65 +1,18 @@
|
||||
"""Tests for unraid_info tool."""
|
||||
"""Tests for system subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from typing import get_args
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.tools.info import (
|
||||
INFO_ACTIONS,
|
||||
_analyze_disk_health,
|
||||
_process_array_status,
|
||||
_process_system_info,
|
||||
)
|
||||
from unraid_mcp.tools.unraid import _analyze_disk_health
|
||||
|
||||
|
||||
# --- Unit tests for helper functions ---
|
||||
|
||||
|
||||
class TestProcessSystemInfo:
|
||||
def test_processes_os_info(self) -> None:
|
||||
raw = {
|
||||
"os": {
|
||||
"distro": "Unraid",
|
||||
"release": "7.2",
|
||||
"platform": "linux",
|
||||
"arch": "x86_64",
|
||||
"hostname": "tower",
|
||||
"uptime": 3600,
|
||||
},
|
||||
"cpu": {"manufacturer": "AMD", "brand": "Ryzen", "cores": 8, "threads": 16},
|
||||
}
|
||||
result = _process_system_info(raw)
|
||||
assert "summary" in result
|
||||
assert "details" in result
|
||||
assert result["summary"]["hostname"] == "tower"
|
||||
assert "AMD" in result["summary"]["cpu"]
|
||||
|
||||
def test_handles_missing_fields(self) -> None:
|
||||
result = _process_system_info({})
|
||||
assert result["summary"] == {"memory_summary": "Memory information not available."}
|
||||
|
||||
def test_processes_memory_layout(self) -> None:
|
||||
raw = {
|
||||
"memory": {
|
||||
"layout": [
|
||||
{
|
||||
"bank": "0",
|
||||
"type": "DDR4",
|
||||
"clockSpeed": 3200,
|
||||
"manufacturer": "G.Skill",
|
||||
"partNum": "XYZ",
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
result = _process_system_info(raw)
|
||||
assert len(result["summary"]["memory_layout_details"]) == 1
|
||||
|
||||
|
||||
class TestAnalyzeDiskHealth:
|
||||
def test_counts_healthy_disks(self) -> None:
|
||||
disks = [{"status": "DISK_OK"}, {"status": "DISK_OK"}]
|
||||
@@ -100,51 +53,17 @@ class TestAnalyzeDiskHealth:
|
||||
assert result["healthy"] == 0
|
||||
|
||||
|
||||
class TestProcessArrayStatus:
|
||||
def test_basic_array(self) -> None:
|
||||
raw = {
|
||||
"state": "STARTED",
|
||||
"capacity": {"kilobytes": {"free": "1048576", "used": "524288", "total": "1572864"}},
|
||||
"parities": [{"status": "DISK_OK"}],
|
||||
"disks": [{"status": "DISK_OK"}],
|
||||
"caches": [],
|
||||
}
|
||||
result = _process_array_status(raw)
|
||||
assert result["summary"]["state"] == "STARTED"
|
||||
assert result["summary"]["overall_health"] == "HEALTHY"
|
||||
|
||||
def test_critical_disk_threshold_array(self) -> None:
|
||||
raw = {
|
||||
"state": "STARTED",
|
||||
"parities": [],
|
||||
"disks": [{"status": "DISK_OK", "critical": 55}],
|
||||
"caches": [],
|
||||
}
|
||||
result = _process_array_status(raw)
|
||||
assert result["summary"]["overall_health"] == "CRITICAL"
|
||||
|
||||
def test_degraded_array(self) -> None:
|
||||
raw = {
|
||||
"state": "STARTED",
|
||||
"parities": [],
|
||||
"disks": [{"status": "DISK_NP"}],
|
||||
"caches": [],
|
||||
}
|
||||
result = _process_array_status(raw)
|
||||
assert result["summary"]["overall_health"] == "DEGRADED"
|
||||
|
||||
|
||||
# --- Integration tests for the tool function ---
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.info.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestUnraidInfoTool:
|
||||
@@ -162,14 +81,14 @@ class TestUnraidInfoTool:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="overview")
|
||||
result = await tool_fn(action="system", subaction="overview")
|
||||
assert "summary" in result
|
||||
_mock_graphql.assert_called_once()
|
||||
|
||||
async def test_ups_device_requires_device_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="device_id is required"):
|
||||
await tool_fn(action="ups_device")
|
||||
await tool_fn(action="system", subaction="ups_device")
|
||||
|
||||
async def test_network_action(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
@@ -193,7 +112,7 @@ class TestUnraidInfoTool:
|
||||
},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="network")
|
||||
result = await tool_fn(action="system", subaction="network")
|
||||
assert "accessUrls" in result
|
||||
assert result["httpPort"] == 6969
|
||||
assert result["httpsPort"] == 31337
|
||||
@@ -201,27 +120,27 @@ class TestUnraidInfoTool:
|
||||
|
||||
async def test_connect_action_raises_tool_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="connect.*not available"):
|
||||
await tool_fn(action="connect")
|
||||
with pytest.raises(ToolError, match="Invalid subaction 'connect'"):
|
||||
await tool_fn(action="system", subaction="connect")
|
||||
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("unexpected")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to execute info/online"):
|
||||
await tool_fn(action="online")
|
||||
with pytest.raises(ToolError, match="Failed to execute system/online"):
|
||||
await tool_fn(action="system", subaction="online")
|
||||
|
||||
async def test_metrics(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"metrics": {"cpu": {"used": 25.5}, "memory": {"used": 8192, "total": 32768}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="metrics")
|
||||
result = await tool_fn(action="system", subaction="metrics")
|
||||
assert result["cpu"]["used"] == 25.5
|
||||
|
||||
async def test_services(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"services": [{"name": "docker", "state": "running"}]}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="services")
|
||||
result = await tool_fn(action="system", subaction="services")
|
||||
assert "services" in result
|
||||
assert len(result["services"]) == 1
|
||||
assert result["services"][0]["name"] == "docker"
|
||||
@@ -231,14 +150,14 @@ class TestUnraidInfoTool:
|
||||
"settings": {"unified": {"values": {"timezone": "US/Eastern"}}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="settings")
|
||||
result = await tool_fn(action="system", subaction="settings")
|
||||
assert result["timezone"] == "US/Eastern"
|
||||
|
||||
async def test_settings_non_dict_values(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Settings values that are not a dict should be wrapped in {'raw': ...}."""
|
||||
_mock_graphql.return_value = {"settings": {"unified": {"values": "raw_string"}}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="settings")
|
||||
result = await tool_fn(action="system", subaction="settings")
|
||||
assert result == {"raw": "raw_string"}
|
||||
|
||||
async def test_servers(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -246,7 +165,7 @@ class TestUnraidInfoTool:
|
||||
"servers": [{"id": "s:1", "name": "tower", "status": "online"}]
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="servers")
|
||||
result = await tool_fn(action="system", subaction="servers")
|
||||
assert "servers" in result
|
||||
assert len(result["servers"]) == 1
|
||||
assert result["servers"][0]["name"] == "tower"
|
||||
@@ -262,7 +181,7 @@ class TestUnraidInfoTool:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="flash")
|
||||
result = await tool_fn(action="system", subaction="flash")
|
||||
assert result["product"] == "SanDisk"
|
||||
|
||||
async def test_ups_devices(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -270,7 +189,7 @@ class TestUnraidInfoTool:
|
||||
"upsDevices": [{"id": "ups:1", "model": "APC", "status": "online", "charge": 100}]
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="ups_devices")
|
||||
result = await tool_fn(action="system", subaction="ups_devices")
|
||||
assert "ups_devices" in result
|
||||
assert len(result["ups_devices"]) == 1
|
||||
assert result["ups_devices"][0]["model"] == "APC"
|
||||
@@ -284,7 +203,7 @@ class TestInfoNetworkErrors:
|
||||
_mock_graphql.side_effect = ToolError("HTTP error 401: Unauthorized")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="401"):
|
||||
await tool_fn(action="overview")
|
||||
await tool_fn(action="system", subaction="overview")
|
||||
|
||||
async def test_overview_connection_refused(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Connection refused should propagate as ToolError."""
|
||||
@@ -293,7 +212,7 @@ class TestInfoNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Connection refused"):
|
||||
await tool_fn(action="overview")
|
||||
await tool_fn(action="system", subaction="overview")
|
||||
|
||||
async def test_network_json_decode_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Invalid JSON from API should propagate as ToolError."""
|
||||
@@ -302,16 +221,17 @@ class TestInfoNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid JSON"):
|
||||
await tool_fn(action="network")
|
||||
await tool_fn(action="system", subaction="network")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Regression: removed actions must not appear in INFO_ACTIONS
|
||||
# Regression: removed actions must not be valid subactions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.parametrize("action", ["update_server", "update_ssh"])
|
||||
def test_removed_info_actions_are_gone(action: str) -> None:
|
||||
assert action not in get_args(INFO_ACTIONS), (
|
||||
f"{action} references a non-existent mutation and must not be in INFO_ACTIONS"
|
||||
)
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("subaction", ["update_server", "update_ssh"])
|
||||
async def test_removed_info_subactions_are_invalid(subaction: str) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="system", subaction=subaction)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_keys tool."""
|
||||
"""Tests for key subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,39 +11,39 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestKeysValidation:
|
||||
async def test_delete_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="confirm=True"):
|
||||
await tool_fn(action="delete", key_id="k:1")
|
||||
await tool_fn(action="key", subaction="delete", key_id="k:1")
|
||||
|
||||
async def test_get_requires_key_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="key_id"):
|
||||
await tool_fn(action="get")
|
||||
await tool_fn(action="key", subaction="get")
|
||||
|
||||
async def test_create_requires_name(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="name"):
|
||||
await tool_fn(action="create")
|
||||
await tool_fn(action="key", subaction="create")
|
||||
|
||||
async def test_update_requires_key_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="key_id"):
|
||||
await tool_fn(action="update")
|
||||
await tool_fn(action="key", subaction="update")
|
||||
|
||||
async def test_delete_requires_key_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="key_id"):
|
||||
await tool_fn(action="delete", confirm=True)
|
||||
await tool_fn(action="key", subaction="delete", confirm=True)
|
||||
|
||||
|
||||
class TestKeysActions:
|
||||
@@ -52,7 +52,7 @@ class TestKeysActions:
|
||||
"apiKeys": [{"id": "k:1", "name": "mcp-key", "roles": ["admin"]}]
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="key", subaction="list")
|
||||
assert len(result["keys"]) == 1
|
||||
|
||||
async def test_get(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -60,7 +60,7 @@ class TestKeysActions:
|
||||
"apiKey": {"id": "k:1", "name": "mcp-key", "roles": ["admin"]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="get", key_id="k:1")
|
||||
result = await tool_fn(action="key", subaction="get", key_id="k:1")
|
||||
assert result["name"] == "mcp-key"
|
||||
|
||||
async def test_create(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -70,7 +70,7 @@ class TestKeysActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="create", name="new-key")
|
||||
result = await tool_fn(action="key", subaction="create", name="new-key")
|
||||
assert result["success"] is True
|
||||
assert result["key"]["name"] == "new-key"
|
||||
|
||||
@@ -86,7 +86,7 @@ class TestKeysActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="create", name="admin-key", roles=["admin"])
|
||||
result = await tool_fn(action="key", subaction="create", name="admin-key", roles=["admin"])
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_update(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -94,39 +94,43 @@ class TestKeysActions:
|
||||
"apiKey": {"update": {"id": "k:1", "name": "renamed", "roles": []}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="update", key_id="k:1", name="renamed")
|
||||
result = await tool_fn(action="key", subaction="update", key_id="k:1", name="renamed")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_delete(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"apiKey": {"delete": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="delete", key_id="k:1", confirm=True)
|
||||
result = await tool_fn(action="key", subaction="delete", key_id="k:1", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("connection lost")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to execute keys/list"):
|
||||
await tool_fn(action="list")
|
||||
with pytest.raises(ToolError, match="Failed to execute key/list"):
|
||||
await tool_fn(action="key", subaction="list")
|
||||
|
||||
async def test_add_role_requires_key_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="key_id"):
|
||||
await tool_fn(action="add_role", roles=["VIEWER"])
|
||||
await tool_fn(action="key", subaction="add_role", roles=["VIEWER"])
|
||||
|
||||
async def test_add_role_requires_role(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="role"):
|
||||
await tool_fn(action="add_role", key_id="abc:local")
|
||||
await tool_fn(action="key", subaction="add_role", key_id="abc:local")
|
||||
|
||||
async def test_add_role_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"apiKey": {"addRole": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="add_role", key_id="abc:local", roles=["VIEWER"])
|
||||
result = await tool_fn(
|
||||
action="key", subaction="add_role", key_id="abc:local", roles=["VIEWER"]
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_remove_role_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"apiKey": {"removeRole": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="remove_role", key_id="abc:local", roles=["VIEWER"])
|
||||
result = await tool_fn(
|
||||
action="key", subaction="remove_role", key_id="abc:local", roles=["VIEWER"]
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
@@ -1,125 +1,131 @@
|
||||
# tests/test_live.py
|
||||
"""Tests for unraid_live subscription snapshot tool."""
|
||||
"""Tests for live subactions of the consolidated unraid tool."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from fastmcp import FastMCP
|
||||
from conftest import make_tool_fn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mcp():
|
||||
return FastMCP("test")
|
||||
|
||||
|
||||
def _make_live_tool(mcp):
|
||||
from unraid_mcp.tools.live import register_live_tool
|
||||
|
||||
register_live_tool(mcp)
|
||||
local_provider = mcp.providers[0]
|
||||
tool = local_provider._components["tool:unraid_live@"]
|
||||
return tool.fn
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_subscribe_once():
|
||||
with patch("unraid_mcp.tools.live.subscribe_once") as m:
|
||||
with patch("unraid_mcp.subscriptions.snapshot.subscribe_once") as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_subscribe_collect():
|
||||
with patch("unraid_mcp.tools.live.subscribe_collect") as m:
|
||||
with patch("unraid_mcp.subscriptions.snapshot.subscribe_collect") as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cpu_returns_snapshot(mcp, _mock_subscribe_once):
|
||||
async def test_cpu_returns_snapshot(_mock_subscribe_once):
|
||||
_mock_subscribe_once.return_value = {"systemMetricsCpu": {"percentTotal": 23.5, "cpus": []}}
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
result = await tool_fn(action="cpu")
|
||||
result = await _make_tool()(action="live", subaction="cpu")
|
||||
assert result["success"] is True
|
||||
assert result["data"]["systemMetricsCpu"]["percentTotal"] == 23.5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_memory_returns_snapshot(mcp, _mock_subscribe_once):
|
||||
async def test_memory_returns_snapshot(_mock_subscribe_once):
|
||||
_mock_subscribe_once.return_value = {
|
||||
"systemMetricsMemory": {"total": 32000000000, "used": 10000000000, "percentTotal": 31.2}
|
||||
}
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
result = await tool_fn(action="memory")
|
||||
result = await _make_tool()(action="live", subaction="memory")
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_log_tail_requires_path(mcp, _mock_subscribe_collect):
|
||||
async def test_log_tail_requires_path(_mock_subscribe_collect):
|
||||
_mock_subscribe_collect.return_value = []
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="path"):
|
||||
await tool_fn(action="log_tail")
|
||||
await _make_tool()(action="live", subaction="log_tail")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_log_tail_with_path(mcp, _mock_subscribe_collect):
|
||||
async def test_log_tail_with_path(_mock_subscribe_collect):
|
||||
_mock_subscribe_collect.return_value = [
|
||||
{"logFile": {"path": "/var/log/syslog", "content": "line1\nline2", "totalLines": 2}}
|
||||
]
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
result = await tool_fn(action="log_tail", path="/var/log/syslog", collect_for=1.0)
|
||||
result = await _make_tool()(
|
||||
action="live", subaction="log_tail", path="/var/log/syslog", collect_for=1.0
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["event_count"] == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_notification_feed_collects_events(mcp, _mock_subscribe_collect):
|
||||
async def test_notification_feed_collects_events(_mock_subscribe_collect):
|
||||
_mock_subscribe_collect.return_value = [
|
||||
{"notificationAdded": {"id": "1", "title": "Alert"}},
|
||||
{"notificationAdded": {"id": "2", "title": "Info"}},
|
||||
]
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
result = await tool_fn(action="notification_feed", collect_for=2.0)
|
||||
result = await _make_tool()(action="live", subaction="notification_feed", collect_for=2.0)
|
||||
assert result["event_count"] == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_action_raises(mcp):
|
||||
async def test_invalid_subaction_raises():
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="nonexistent") # type: ignore[arg-type]
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await _make_tool()(action="live", subaction="nonexistent")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_snapshot_propagates_tool_error(mcp, _mock_subscribe_once):
|
||||
async def test_snapshot_propagates_tool_error(_mock_subscribe_once):
|
||||
"""Non-event-driven (streaming) actions still propagate timeout as ToolError."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
_mock_subscribe_once.side_effect = ToolError("Subscription timed out after 10s")
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
with pytest.raises(ToolError, match="timed out"):
|
||||
await tool_fn(action="cpu")
|
||||
await _make_tool()(action="live", subaction="cpu")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_log_tail_rejects_invalid_path(mcp, _mock_subscribe_collect):
|
||||
async def test_event_driven_timeout_returns_no_recent_events(_mock_subscribe_once):
|
||||
"""Event-driven subscriptions return a graceful no_recent_events response on timeout."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
with pytest.raises(ToolError, match="must start with"):
|
||||
await tool_fn(action="log_tail", path="/etc/shadow")
|
||||
_mock_subscribe_once.side_effect = ToolError("Subscription timed out after 10s")
|
||||
result = await _make_tool()(action="live", subaction="notifications_overview")
|
||||
assert result["success"] is True
|
||||
assert result["status"] == "no_recent_events"
|
||||
assert "No events received" in result["message"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_snapshot_wraps_bare_exception(mcp, _mock_subscribe_once):
|
||||
async def test_event_driven_non_timeout_error_propagates(_mock_subscribe_once):
|
||||
"""Non-timeout ToolErrors from event-driven subscriptions still propagate."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
_mock_subscribe_once.side_effect = ToolError("Subscription auth failed")
|
||||
with pytest.raises(ToolError, match="auth failed"):
|
||||
await _make_tool()(action="live", subaction="owner")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_log_tail_rejects_invalid_path(_mock_subscribe_collect):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="must start with"):
|
||||
await _make_tool()(action="live", subaction="log_tail", path="/etc/shadow")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_snapshot_wraps_bare_exception(_mock_subscribe_once):
|
||||
"""Bare exceptions from subscribe_once are wrapped in ToolError by tool_error_handler."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
_mock_subscribe_once.side_effect = RuntimeError("WebSocket connection refused")
|
||||
tool_fn = _make_live_tool(mcp)
|
||||
with pytest.raises(ToolError):
|
||||
await tool_fn(action="cpu")
|
||||
await _make_tool()(action="live", subaction="cpu")
|
||||
|
||||
@@ -1,67 +1,54 @@
|
||||
"""Tests for unraid_notifications tool."""
|
||||
"""Tests for notification subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from typing import get_args
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.tools.notifications import NOTIFICATION_ACTIONS
|
||||
|
||||
|
||||
def test_warnings_action_removed() -> None:
|
||||
assert "warnings" not in get_args(NOTIFICATION_ACTIONS), (
|
||||
"warnings action references warningsAndAlerts which is not in live API"
|
||||
)
|
||||
|
||||
|
||||
def test_create_unique_action_removed() -> None:
|
||||
assert "create_unique" not in get_args(NOTIFICATION_ACTIONS), (
|
||||
"create_unique references notifyIfUnique which is not in live API"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn(
|
||||
"unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"
|
||||
)
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestNotificationsValidation:
|
||||
async def test_delete_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action="delete", notification_id="n:1", notification_type="UNREAD")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="n:1",
|
||||
notification_type="UNREAD",
|
||||
)
|
||||
|
||||
async def test_delete_archived_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action="delete_archived")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(action="notification", subaction="delete_archived")
|
||||
|
||||
async def test_create_requires_fields(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="requires title"):
|
||||
await tool_fn(action="create")
|
||||
await tool_fn(action="notification", subaction="create")
|
||||
|
||||
async def test_archive_requires_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="notification_id"):
|
||||
await tool_fn(action="archive")
|
||||
await tool_fn(action="notification", subaction="archive")
|
||||
|
||||
async def test_delete_requires_id_and_type(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="requires notification_id"):
|
||||
await tool_fn(action="delete", confirm=True)
|
||||
await tool_fn(action="notification", subaction="delete", confirm=True)
|
||||
|
||||
|
||||
class TestNotificationsActions:
|
||||
@@ -75,7 +62,7 @@ class TestNotificationsActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="overview")
|
||||
result = await tool_fn(action="notification", subaction="overview")
|
||||
assert result["unread"]["total"] == 7
|
||||
|
||||
async def test_list(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -83,7 +70,7 @@ class TestNotificationsActions:
|
||||
"notifications": {"list": [{"id": "n:1", "title": "Test", "importance": "INFO"}]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="notification", subaction="list")
|
||||
assert len(result["notifications"]) == 1
|
||||
|
||||
async def test_create(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -92,7 +79,8 @@ class TestNotificationsActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Test",
|
||||
subject="Test Subject",
|
||||
description="Test Desc",
|
||||
@@ -103,7 +91,7 @@ class TestNotificationsActions:
|
||||
async def test_archive_notification(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"archiveNotification": {"id": "n:1"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="archive", notification_id="n:1")
|
||||
result = await tool_fn(action="notification", subaction="archive", notification_id="n:1")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_delete_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -115,7 +103,8 @@ class TestNotificationsActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="delete",
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="n:1",
|
||||
notification_type="unread",
|
||||
confirm=True,
|
||||
@@ -130,22 +119,26 @@ class TestNotificationsActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="archive_all")
|
||||
result = await tool_fn(action="notification", subaction="archive_all")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_unread_notification(self, _mock_graphql: AsyncMock) -> None:
|
||||
async def test_mark_unread_notification(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"unreadNotification": {"id": "n:1"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="unread", notification_id="n:1")
|
||||
result = await tool_fn(
|
||||
action="notification", subaction="mark_unread", notification_id="n:1"
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "unread"
|
||||
assert result["subaction"] == "mark_unread"
|
||||
|
||||
async def test_list_with_importance_filter(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"notifications": {"list": [{"id": "n:1", "title": "Alert", "importance": "WARNING"}]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list", importance="warning", limit=10, offset=5)
|
||||
result = await tool_fn(
|
||||
action="notification", subaction="list", importance="warning", limit=10, offset=5
|
||||
)
|
||||
assert len(result["notifications"]) == 1
|
||||
call_args = _mock_graphql.call_args
|
||||
filter_var = call_args[0][1]["filter"]
|
||||
@@ -161,15 +154,15 @@ class TestNotificationsActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="delete_archived", confirm=True)
|
||||
result = await tool_fn(action="notification", subaction="delete_archived", confirm=True)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "delete_archived"
|
||||
assert result["subaction"] == "delete_archived"
|
||||
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("boom")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to execute notifications/overview"):
|
||||
await tool_fn(action="overview")
|
||||
with pytest.raises(ToolError, match="Failed to execute notification/overview"):
|
||||
await tool_fn(action="notification", subaction="overview")
|
||||
|
||||
|
||||
class TestNotificationsCreateValidation:
|
||||
@@ -179,7 +172,8 @@ class TestNotificationsCreateValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid importance"):
|
||||
await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="S",
|
||||
description="D",
|
||||
@@ -191,7 +185,8 @@ class TestNotificationsCreateValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid importance"):
|
||||
await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="S",
|
||||
description="D",
|
||||
@@ -202,7 +197,12 @@ class TestNotificationsCreateValidation:
|
||||
_mock_graphql.return_value = {"createNotification": {"id": "n:1", "importance": "ALERT"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create", title="T", subject="S", description="D", importance="alert"
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="S",
|
||||
description="D",
|
||||
importance="alert",
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
@@ -210,7 +210,8 @@ class TestNotificationsCreateValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="title must be at most 200"):
|
||||
await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="x" * 201,
|
||||
subject="S",
|
||||
description="D",
|
||||
@@ -221,7 +222,8 @@ class TestNotificationsCreateValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="subject must be at most 500"):
|
||||
await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="x" * 501,
|
||||
description="D",
|
||||
@@ -232,7 +234,8 @@ class TestNotificationsCreateValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="description must be at most 2000"):
|
||||
await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="S",
|
||||
description="x" * 2001,
|
||||
@@ -243,7 +246,8 @@ class TestNotificationsCreateValidation:
|
||||
_mock_graphql.return_value = {"createNotification": {"id": "n:1", "importance": "INFO"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="x" * 200,
|
||||
subject="S",
|
||||
description="D",
|
||||
@@ -261,7 +265,9 @@ class TestNewNotificationMutations:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="archive_many", notification_ids=["n:1", "n:2"])
|
||||
result = await tool_fn(
|
||||
action="notification", subaction="archive_many", notification_ids=["n:1", "n:2"]
|
||||
)
|
||||
assert result["success"] is True
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"ids": ["n:1", "n:2"]}
|
||||
@@ -269,7 +275,7 @@ class TestNewNotificationMutations:
|
||||
async def test_archive_many_requires_ids(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="notification_ids"):
|
||||
await tool_fn(action="archive_many")
|
||||
await tool_fn(action="notification", subaction="archive_many")
|
||||
|
||||
async def test_unarchive_many_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
@@ -279,13 +285,15 @@ class TestNewNotificationMutations:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="unarchive_many", notification_ids=["n:1", "n:2"])
|
||||
result = await tool_fn(
|
||||
action="notification", subaction="unarchive_many", notification_ids=["n:1", "n:2"]
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_unarchive_many_requires_ids(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="notification_ids"):
|
||||
await tool_fn(action="unarchive_many")
|
||||
await tool_fn(action="notification", subaction="unarchive_many")
|
||||
|
||||
async def test_unarchive_all_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
@@ -295,7 +303,7 @@ class TestNewNotificationMutations:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="unarchive_all")
|
||||
result = await tool_fn(action="notification", subaction="unarchive_all")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_unarchive_all_with_importance(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -304,7 +312,7 @@ class TestNewNotificationMutations:
|
||||
"unarchiveAll": {"unread": {"total": 1}, "archive": {"total": 0}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
await tool_fn(action="unarchive_all", importance="warning")
|
||||
await tool_fn(action="notification", subaction="unarchive_all", importance="warning")
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"importance": "WARNING"}
|
||||
|
||||
@@ -316,5 +324,5 @@ class TestNewNotificationMutations:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="recalculate")
|
||||
result = await tool_fn(action="notification", subaction="recalculate")
|
||||
assert result["success"] is True
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# tests/test_oidc.py
|
||||
"""Tests for unraid_oidc tool."""
|
||||
"""Tests for oidc subactions of the consolidated unraid tool."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -11,16 +11,12 @@ from conftest import make_tool_fn
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql():
|
||||
with patch("unraid_mcp.tools.oidc.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn(
|
||||
"unraid_mcp.tools.oidc",
|
||||
"register_oidc_tool",
|
||||
"unraid_oidc",
|
||||
)
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -30,15 +26,16 @@ async def test_providers_returns_list(_mock_graphql):
|
||||
{"id": "1:local", "name": "Google", "clientId": "abc", "scopes": ["openid"]}
|
||||
]
|
||||
}
|
||||
result = await _make_tool()(action="providers")
|
||||
assert result["success"] is True
|
||||
result = await _make_tool()(action="oidc", subaction="providers")
|
||||
assert "providers" in result
|
||||
assert len(result["providers"]) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_public_providers(_mock_graphql):
|
||||
_mock_graphql.return_value = {"publicOidcProviders": []}
|
||||
result = await _make_tool()(action="public_providers")
|
||||
assert result["success"] is True
|
||||
result = await _make_tool()(action="oidc", subaction="public_providers")
|
||||
assert result["providers"] == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -46,7 +43,7 @@ async def test_provider_requires_provider_id(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="provider_id"):
|
||||
await _make_tool()(action="provider")
|
||||
await _make_tool()(action="oidc", subaction="provider")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -54,7 +51,7 @@ async def test_validate_session_requires_token(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="token"):
|
||||
await _make_tool()(action="validate_session")
|
||||
await _make_tool()(action="oidc", subaction="validate_session")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -62,5 +59,6 @@ async def test_configuration(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"oidcConfiguration": {"providers": [], "defaultAllowedOrigins": []}
|
||||
}
|
||||
result = await _make_tool()(action="configuration")
|
||||
assert result["success"] is True
|
||||
result = await _make_tool()(action="oidc", subaction="configuration")
|
||||
assert result["providers"] == []
|
||||
assert result["defaultAllowedOrigins"] == []
|
||||
|
||||
@@ -1,72 +1,63 @@
|
||||
# tests/test_plugins.py
|
||||
"""Tests for unraid_plugins tool."""
|
||||
"""Tests for plugin subactions of the consolidated unraid tool."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from fastmcp import FastMCP
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mcp():
|
||||
return FastMCP("test")
|
||||
from conftest import make_tool_fn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql():
|
||||
with patch("unraid_mcp.tools.plugins.make_graphql_request") as m:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request") as m:
|
||||
yield m
|
||||
|
||||
|
||||
def _make_tool(mcp):
|
||||
from unraid_mcp.tools.plugins import register_plugins_tool
|
||||
|
||||
register_plugins_tool(mcp)
|
||||
# FastMCP 3.x: access tool fn via internal provider components (same as conftest.make_tool_fn)
|
||||
local_provider = mcp.providers[0]
|
||||
tool = local_provider._components["tool:unraid_plugins@"]
|
||||
return tool.fn
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_returns_plugins(mcp, _mock_graphql):
|
||||
async def test_list_returns_plugins(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"plugins": [
|
||||
{"name": "my-plugin", "version": "1.0.0", "hasApiModule": True, "hasCliModule": False}
|
||||
]
|
||||
}
|
||||
result = await _make_tool(mcp)(action="list")
|
||||
result = await _make_tool()(action="plugin", subaction="list")
|
||||
assert result["success"] is True
|
||||
assert len(result["data"]["plugins"]) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_requires_names(mcp, _mock_graphql):
|
||||
async def test_add_requires_names(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="names"):
|
||||
await _make_tool(mcp)(action="add")
|
||||
await _make_tool()(action="plugin", subaction="add")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_success(mcp, _mock_graphql):
|
||||
async def test_add_success(_mock_graphql):
|
||||
_mock_graphql.return_value = {"addPlugin": False} # False = auto-restart triggered
|
||||
result = await _make_tool(mcp)(action="add", names=["my-plugin"])
|
||||
result = await _make_tool()(action="plugin", subaction="add", names=["my-plugin"])
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_requires_confirm(mcp, _mock_graphql):
|
||||
async def test_remove_requires_confirm(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await _make_tool(mcp)(action="remove", names=["my-plugin"], confirm=False)
|
||||
await _make_tool()(action="plugin", subaction="remove", names=["my-plugin"], confirm=False)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_with_confirm(mcp, _mock_graphql):
|
||||
async def test_remove_with_confirm(_mock_graphql):
|
||||
_mock_graphql.return_value = {"removePlugin": True}
|
||||
result = await _make_tool(mcp)(action="remove", names=["my-plugin"], confirm=True)
|
||||
result = await _make_tool()(
|
||||
action="plugin", subaction="remove", names=["my-plugin"], confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_rclone tool."""
|
||||
"""Tests for rclone subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,36 +11,36 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.rclone.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestRcloneValidation:
|
||||
async def test_delete_requires_confirm(self) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action="delete_remote", name="gdrive")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(action="rclone", subaction="delete_remote", name="gdrive")
|
||||
|
||||
async def test_create_requires_fields(self) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="requires name"):
|
||||
await tool_fn(action="create_remote")
|
||||
await tool_fn(action="rclone", subaction="create_remote")
|
||||
|
||||
async def test_delete_requires_name(self) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="name is required"):
|
||||
await tool_fn(action="delete_remote", confirm=True)
|
||||
await tool_fn(action="rclone", subaction="delete_remote", confirm=True)
|
||||
|
||||
|
||||
class TestRcloneActions:
|
||||
async def test_list_remotes(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"remotes": [{"name": "gdrive", "type": "drive"}]}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list_remotes")
|
||||
result = await tool_fn(action="rclone", subaction="list_remotes")
|
||||
assert len(result["remotes"]) == 1
|
||||
|
||||
async def test_config_form(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -48,7 +48,7 @@ class TestRcloneActions:
|
||||
"rclone": {"configForm": {"id": "form:1", "dataSchema": {}, "uiSchema": {}}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="config_form")
|
||||
result = await tool_fn(action="rclone", subaction="config_form")
|
||||
assert result["id"] == "form:1"
|
||||
|
||||
async def test_config_form_with_provider(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -56,7 +56,7 @@ class TestRcloneActions:
|
||||
"rclone": {"configForm": {"id": "form:s3", "dataSchema": {}, "uiSchema": {}}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="config_form", provider_type="s3")
|
||||
result = await tool_fn(action="rclone", subaction="config_form", provider_type="s3")
|
||||
assert result["id"] == "form:s3"
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"formOptions": {"providerType": "s3"}}
|
||||
@@ -67,7 +67,8 @@ class TestRcloneActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="newremote",
|
||||
provider_type="s3",
|
||||
config_data={"bucket": "mybucket"},
|
||||
@@ -81,7 +82,8 @@ class TestRcloneActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="ftp-remote",
|
||||
provider_type="ftp",
|
||||
config_data={},
|
||||
@@ -91,14 +93,16 @@ class TestRcloneActions:
|
||||
async def test_delete_remote(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"deleteRCloneRemote": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="delete_remote", name="gdrive", confirm=True)
|
||||
result = await tool_fn(
|
||||
action="rclone", subaction="delete_remote", name="gdrive", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_delete_remote_failure(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"deleteRCloneRemote": False}}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to delete"):
|
||||
await tool_fn(action="delete_remote", name="gdrive", confirm=True)
|
||||
await tool_fn(action="rclone", subaction="delete_remote", name="gdrive", confirm=True)
|
||||
|
||||
|
||||
class TestRcloneConfigDataValidation:
|
||||
@@ -108,7 +112,8 @@ class TestRcloneConfigDataValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="disallowed characters"):
|
||||
await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"../evil": "value"},
|
||||
@@ -118,7 +123,8 @@ class TestRcloneConfigDataValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="disallowed characters"):
|
||||
await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"key;rm": "value"},
|
||||
@@ -128,7 +134,8 @@ class TestRcloneConfigDataValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="max 50"):
|
||||
await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={f"key{i}": "v" for i in range(51)},
|
||||
@@ -138,7 +145,8 @@ class TestRcloneConfigDataValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="string, number, or boolean"):
|
||||
await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"nested": {"key": "val"}},
|
||||
@@ -148,19 +156,19 @@ class TestRcloneConfigDataValidation:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="exceeds max length"):
|
||||
await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"key": "x" * 4097},
|
||||
)
|
||||
|
||||
async def test_boolean_value_accepted(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"rclone": {"createRCloneRemote": {"name": "r", "type": "s3"}}
|
||||
}
|
||||
_mock_graphql.return_value = {"rclone": {"createRCloneRemote": {"name": "r", "type": "s3"}}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"use_path_style": True},
|
||||
@@ -173,7 +181,8 @@ class TestRcloneConfigDataValidation:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="sftp",
|
||||
config_data={"port": 22},
|
||||
|
||||
@@ -6,6 +6,7 @@ from unittest.mock import AsyncMock, patch
|
||||
import pytest
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from unraid_mcp.subscriptions.queries import SNAPSHOT_ACTIONS
|
||||
from unraid_mcp.subscriptions.resources import register_subscription_resources
|
||||
|
||||
|
||||
@@ -16,15 +17,6 @@ def _make_resources():
|
||||
return test_mcp
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_subscribe_once():
|
||||
with patch(
|
||||
"unraid_mcp.subscriptions.resources.subscribe_once",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_ensure_started():
|
||||
with patch(
|
||||
@@ -34,64 +26,144 @@ def _mock_ensure_started():
|
||||
yield mock
|
||||
|
||||
|
||||
class TestLiveResources:
|
||||
@pytest.mark.parametrize(
|
||||
"action",
|
||||
[
|
||||
"cpu",
|
||||
"memory",
|
||||
"cpu_telemetry",
|
||||
"array_state",
|
||||
"parity_progress",
|
||||
"ups_status",
|
||||
"notifications_overview",
|
||||
"owner",
|
||||
"server_status",
|
||||
],
|
||||
class TestLiveResourcesUseManagerCache:
|
||||
"""All live resources must read from the persistent SubscriptionManager cache."""
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_resource_returns_cached_data(self, action: str) -> None:
|
||||
cached = {"systemMetricsCpu": {"percentTotal": 12.5}}
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=cached)
|
||||
mcp = _make_resources()
|
||||
# Accessing FastMCP internals intentionally for unit test isolation.
|
||||
# This may break on FastMCP upgrades — consider a make_resource_fn() helper if it does.
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
assert json.loads(result) == cached
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_resource_returns_connecting_when_no_cache_and_no_error(
|
||||
self, action: str
|
||||
) -> None:
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mock_mgr.last_error = {}
|
||||
mcp = _make_resources()
|
||||
# Accessing FastMCP internals intentionally for unit test isolation.
|
||||
# This may break on FastMCP upgrades — consider a make_resource_fn() helper if it does.
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
parsed = json.loads(result)
|
||||
assert parsed["status"] == "connecting"
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_resource_returns_error_status_on_permanent_failure(self, action: str) -> None:
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mock_mgr.last_error = {action: "WebSocket auth failed"}
|
||||
mcp = _make_resources()
|
||||
# Accessing FastMCP internals intentionally for unit test isolation.
|
||||
# This may break on FastMCP upgrades — consider a make_resource_fn() helper if it does.
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
parsed = json.loads(result)
|
||||
assert parsed["status"] == "error"
|
||||
assert "auth failed" in parsed["message"]
|
||||
|
||||
|
||||
class TestSnapshotSubscriptionsRegistered:
|
||||
"""All SNAPSHOT_ACTIONS must be registered in the SubscriptionManager with auto_start=True."""
|
||||
|
||||
def test_all_snapshot_actions_in_configs(self) -> None:
|
||||
from unraid_mcp.subscriptions.manager import subscription_manager
|
||||
|
||||
for action in SNAPSHOT_ACTIONS:
|
||||
assert action in subscription_manager.subscription_configs, (
|
||||
f"'{action}' not registered in subscription_configs"
|
||||
)
|
||||
async def test_resource_returns_json(
|
||||
self,
|
||||
action: str,
|
||||
_mock_subscribe_once: AsyncMock,
|
||||
_mock_ensure_started: AsyncMock,
|
||||
) -> None:
|
||||
_mock_subscribe_once.return_value = {"data": "ok"}
|
||||
mcp = _make_resources()
|
||||
|
||||
local_provider = mcp.providers[0]
|
||||
resource_key = f"resource:unraid://live/{action}@"
|
||||
resource = local_provider._components[resource_key]
|
||||
result = await resource.fn()
|
||||
def test_all_snapshot_actions_autostart(self) -> None:
|
||||
from unraid_mcp.subscriptions.manager import subscription_manager
|
||||
|
||||
parsed = json.loads(result)
|
||||
assert parsed == {"data": "ok"}
|
||||
|
||||
async def test_resource_returns_error_dict_on_failure(
|
||||
self,
|
||||
_mock_subscribe_once: AsyncMock,
|
||||
_mock_ensure_started: AsyncMock,
|
||||
) -> None:
|
||||
from fastmcp.exceptions import ToolError
|
||||
|
||||
_mock_subscribe_once.side_effect = ToolError("WebSocket timeout")
|
||||
mcp = _make_resources()
|
||||
|
||||
local_provider = mcp.providers[0]
|
||||
resource = local_provider._components["resource:unraid://live/cpu@"]
|
||||
result = await resource.fn()
|
||||
|
||||
parsed = json.loads(result)
|
||||
assert "error" in parsed
|
||||
assert "WebSocket timeout" in parsed["error"]
|
||||
for action in SNAPSHOT_ACTIONS:
|
||||
config = subscription_manager.subscription_configs[action]
|
||||
assert config.get("auto_start") is True, (
|
||||
f"'{action}' missing auto_start=True in subscription_configs"
|
||||
)
|
||||
|
||||
|
||||
class TestLogsStreamResource:
|
||||
async def test_logs_stream_no_data(self, _mock_ensure_started: AsyncMock) -> None:
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_logs_stream_no_data(self) -> None:
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mcp = _make_resources()
|
||||
local_provider = mcp.providers[0]
|
||||
# Accessing FastMCP internals intentionally for unit test isolation.
|
||||
# This may break on FastMCP upgrades — consider a make_resource_fn() helper if it does.
|
||||
resource = local_provider._components["resource:unraid://logs/stream@"]
|
||||
result = await resource.fn()
|
||||
parsed = json.loads(result)
|
||||
assert "status" in parsed
|
||||
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_logs_stream_returns_data_with_empty_dict(self) -> None:
|
||||
"""Empty dict cache hit must return data, not 'connecting' status."""
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value={})
|
||||
mcp = _make_resources()
|
||||
local_provider = mcp.providers[0]
|
||||
# Accessing FastMCP internals intentionally for unit test isolation.
|
||||
# This may break on FastMCP upgrades — consider a make_resource_fn() helper if it does.
|
||||
resource = local_provider._components["resource:unraid://logs/stream@"]
|
||||
result = await resource.fn()
|
||||
assert json.loads(result) == {}
|
||||
|
||||
|
||||
class TestAutoStartDisabledFallback:
|
||||
"""When auto_start is disabled, resources fall back to on-demand subscribe_once."""
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_fallback_returns_subscribe_once_data(self, action: str) -> None:
|
||||
fallback_data = {"systemMetricsCpu": {"percentTotal": 42.0}}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr,
|
||||
patch(
|
||||
"unraid_mcp.subscriptions.resources.subscribe_once",
|
||||
new=AsyncMock(return_value=fallback_data),
|
||||
),
|
||||
):
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mock_mgr.last_error = {}
|
||||
mock_mgr.auto_start_enabled = False
|
||||
mcp = _make_resources()
|
||||
# Accessing FastMCP internals intentionally for unit test isolation.
|
||||
# This may break on FastMCP upgrades — consider a make_resource_fn() helper if it does.
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
assert json.loads(result) == fallback_data
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_fallback_failure_returns_connecting(self, action: str) -> None:
|
||||
"""When on-demand fallback itself fails, still return 'connecting' status."""
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr,
|
||||
patch(
|
||||
"unraid_mcp.subscriptions.resources.subscribe_once",
|
||||
new=AsyncMock(side_effect=Exception("WebSocket failed")),
|
||||
),
|
||||
):
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mock_mgr.last_error = {}
|
||||
mock_mgr.auto_start_enabled = False
|
||||
mcp = _make_resources()
|
||||
# Accessing FastMCP internals intentionally for unit test isolation.
|
||||
# This may break on FastMCP upgrades — consider a make_resource_fn() helper if it does.
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
assert json.loads(result)["status"] == "connecting"
|
||||
|
||||
@@ -1,38 +1,32 @@
|
||||
"""Tests for the unraid_settings tool."""
|
||||
"""Tests for the setting subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from typing import get_args
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from fastmcp import FastMCP
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.tools.settings import SETTINGS_ACTIONS, register_settings_tool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.settings.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool() -> AsyncMock:
|
||||
test_mcp = FastMCP("test")
|
||||
register_settings_tool(test_mcp)
|
||||
# FastMCP 3.x stores tools in providers[0]._components keyed as "tool:{name}@"
|
||||
local_provider = test_mcp.providers[0]
|
||||
tool = local_provider._components["tool:unraid_settings@"] # ty: ignore[unresolved-attribute]
|
||||
return tool.fn
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Regression: removed actions must not appear in SETTINGS_ACTIONS
|
||||
# Regression: removed subactions must raise Invalid subaction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"action",
|
||||
"subaction",
|
||||
[
|
||||
"update_temperature",
|
||||
"update_time",
|
||||
@@ -44,10 +38,10 @@ def _make_tool() -> AsyncMock:
|
||||
"update_ssh",
|
||||
],
|
||||
)
|
||||
def test_removed_settings_actions_are_gone(action: str) -> None:
|
||||
assert action not in get_args(SETTINGS_ACTIONS), (
|
||||
f"{action} references a non-existent mutation and must not be in SETTINGS_ACTIONS"
|
||||
)
|
||||
async def test_removed_settings_subactions_are_invalid(subaction: str) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="setting", subaction=subaction)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -56,19 +50,19 @@ def test_removed_settings_actions_are_gone(action: str) -> None:
|
||||
|
||||
|
||||
class TestSettingsValidation:
|
||||
"""Tests for action validation and destructive guard."""
|
||||
"""Tests for subaction validation and destructive guard."""
|
||||
|
||||
async def test_invalid_action(self, _mock_graphql: AsyncMock) -> None:
|
||||
async def test_invalid_subaction(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="nonexistent_action")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="setting", subaction="nonexistent_action")
|
||||
|
||||
async def test_destructive_configure_ups_requires_confirm(
|
||||
self, _mock_graphql: AsyncMock
|
||||
) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="confirm=True"):
|
||||
await tool_fn(action="configure_ups", ups_config={"mode": "slave"})
|
||||
await tool_fn(action="setting", subaction="configure_ups", ups_config={"mode": "slave"})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -77,21 +71,23 @@ class TestSettingsValidation:
|
||||
|
||||
|
||||
class TestSettingsUpdate:
|
||||
"""Tests for update action."""
|
||||
"""Tests for update subaction."""
|
||||
|
||||
async def test_update_requires_settings_input(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="settings_input is required"):
|
||||
await tool_fn(action="update")
|
||||
await tool_fn(action="setting", subaction="update")
|
||||
|
||||
async def test_update_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"updateSettings": {"restartRequired": False, "values": {}, "warnings": []}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="update", settings_input={"shareCount": 5})
|
||||
result = await tool_fn(
|
||||
action="setting", subaction="update", settings_input={"shareCount": 5}
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "update"
|
||||
assert result["subaction"] == "update"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -100,18 +96,21 @@ class TestSettingsUpdate:
|
||||
|
||||
|
||||
class TestUpsConfig:
|
||||
"""Tests for configure_ups action."""
|
||||
"""Tests for configure_ups subaction."""
|
||||
|
||||
async def test_configure_ups_requires_ups_config(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="ups_config is required"):
|
||||
await tool_fn(action="configure_ups", confirm=True)
|
||||
await tool_fn(action="setting", subaction="configure_ups", confirm=True)
|
||||
|
||||
async def test_configure_ups_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"configureUps": True}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="configure_ups", confirm=True, ups_config={"mode": "master", "cable": "usb"}
|
||||
action="setting",
|
||||
subaction="configure_ups",
|
||||
confirm=True,
|
||||
ups_config={"mode": "master", "cable": "usb"},
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "configure_ups"
|
||||
assert result["subaction"] == "configure_ups"
|
||||
|
||||
@@ -387,6 +387,123 @@ def test_tool_error_handler_credentials_error_message_includes_path():
|
||||
assert "setup" in str(exc_info.value).lower()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# elicit_reset_confirmation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_false_when_ctx_none():
|
||||
"""Returns False immediately when no MCP context is available."""
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
result = await elicit_reset_confirmation(None, "https://example.com")
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_true_when_user_confirms():
|
||||
"""Returns True when the user accepts and answers True."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "accept"
|
||||
mock_result.data = True
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_false_when_user_answers_false():
|
||||
"""Returns False when the user accepts but answers False (does not want to reset)."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "accept"
|
||||
mock_result.data = False
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_false_when_declined():
|
||||
"""Returns False when the user declines via action (dismisses the prompt)."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "decline"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_false_when_cancelled():
|
||||
"""Returns False when the user cancels the prompt."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "cancel"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_true_when_not_implemented():
|
||||
"""Returns True (proceed with reset) when the MCP client does not support elicitation.
|
||||
|
||||
Non-interactive clients (stdio, CI) must not be permanently blocked from
|
||||
reconfiguring credentials just because they can't ask the user a yes/no question.
|
||||
"""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_ctx.elicit = AsyncMock(side_effect=NotImplementedError("elicitation not supported"))
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_includes_current_url_in_prompt():
|
||||
"""The elicitation message includes the current URL so the user knows what they're replacing."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "decline"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
await elicit_reset_confirmation(mock_ctx, "https://my-unraid.example.com:31337")
|
||||
|
||||
call_kwargs = mock_ctx.elicit.call_args
|
||||
message = call_kwargs.kwargs.get("message") or call_kwargs.args[0]
|
||||
assert "https://my-unraid.example.com:31337" in message
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_credentials_not_configured_surfaces_as_tool_error_with_path():
|
||||
"""CredentialsNotConfiguredError from a tool becomes ToolError with the credentials path."""
|
||||
@@ -396,15 +513,15 @@ async def test_credentials_not_configured_surfaces_as_tool_error_with_path():
|
||||
from unraid_mcp.config.settings import CREDENTIALS_ENV_PATH
|
||||
from unraid_mcp.core.exceptions import CredentialsNotConfiguredError, ToolError
|
||||
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.users", "register_users_tool", "unraid_users")
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
with (
|
||||
patch(
|
||||
"unraid_mcp.tools.users.make_graphql_request",
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(side_effect=CredentialsNotConfiguredError()),
|
||||
),
|
||||
pytest.raises(ToolError) as exc_info,
|
||||
):
|
||||
await tool_fn(action="me")
|
||||
await tool_fn(action="user", subaction="me")
|
||||
|
||||
assert str(CREDENTIALS_ENV_PATH) in str(exc_info.value)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Tests for unraid_storage tool."""
|
||||
"""Tests for disk subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from typing import get_args
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
@@ -9,13 +8,6 @@ from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.core.utils import format_bytes, format_kb, safe_get
|
||||
from unraid_mcp.tools.storage import STORAGE_ACTIONS
|
||||
|
||||
|
||||
def test_unassigned_action_removed() -> None:
|
||||
assert "unassigned" not in get_args(STORAGE_ACTIONS), (
|
||||
"unassigned action references unassignedDevices which is not in live API"
|
||||
)
|
||||
|
||||
|
||||
# --- Unit tests for helpers ---
|
||||
@@ -46,59 +38,63 @@ class TestFormatBytes:
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.storage.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestStorageValidation:
|
||||
async def test_disk_details_requires_disk_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="disk_id"):
|
||||
await tool_fn(action="disk_details")
|
||||
await tool_fn(action="disk", subaction="disk_details")
|
||||
|
||||
async def test_logs_requires_log_path(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="log_path"):
|
||||
await tool_fn(action="logs")
|
||||
await tool_fn(action="disk", subaction="logs")
|
||||
|
||||
async def test_logs_rejects_invalid_path(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="log_path must start with"):
|
||||
await tool_fn(action="logs", log_path="/etc/shadow")
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/etc/shadow")
|
||||
|
||||
async def test_logs_rejects_path_traversal(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
# Traversal that escapes /var/log/ to reach /etc/shadow
|
||||
with pytest.raises(ToolError, match="log_path must start with"):
|
||||
await tool_fn(action="logs", log_path="/var/log/../../etc/shadow")
|
||||
# Traversal that escapes /mnt/ to reach /etc/passwd
|
||||
with pytest.raises(ToolError, match="log_path must start with"):
|
||||
await tool_fn(action="logs", log_path="/mnt/../etc/passwd")
|
||||
# Traversal that escapes /var/log/ — detected by early .. check
|
||||
with pytest.raises(ToolError, match="log_path"):
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/var/log/../../etc/shadow")
|
||||
# Traversal via .. — detected by early .. check
|
||||
with pytest.raises(ToolError, match="log_path"):
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/var/log/../etc/passwd")
|
||||
|
||||
async def test_logs_allows_valid_paths(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "ok"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
||||
result = await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog")
|
||||
assert result["content"] == "ok"
|
||||
|
||||
async def test_logs_tail_lines_too_large(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="tail_lines must be between"):
|
||||
await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=10_001)
|
||||
await tool_fn(
|
||||
action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=10_001
|
||||
)
|
||||
|
||||
async def test_logs_tail_lines_zero_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="tail_lines must be between"):
|
||||
await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=0)
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=0)
|
||||
|
||||
async def test_logs_tail_lines_at_max_accepted(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "ok"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=10_000)
|
||||
result = await tool_fn(
|
||||
action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=10_000
|
||||
)
|
||||
assert result["content"] == "ok"
|
||||
|
||||
async def test_non_logs_action_ignores_tail_lines_validation(
|
||||
@@ -106,7 +102,7 @@ class TestStorageValidation:
|
||||
) -> None:
|
||||
_mock_graphql.return_value = {"shares": []}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="shares", tail_lines=0)
|
||||
result = await tool_fn(action="disk", subaction="shares", tail_lines=0)
|
||||
assert result["shares"] == []
|
||||
|
||||
|
||||
@@ -173,13 +169,13 @@ class TestStorageActions:
|
||||
"shares": [{"id": "s:1", "name": "media"}, {"id": "s:2", "name": "backups"}]
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="shares")
|
||||
result = await tool_fn(action="disk", subaction="shares")
|
||||
assert len(result["shares"]) == 2
|
||||
|
||||
async def test_disks(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"disks": [{"id": "d:1", "device": "sda"}]}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disks")
|
||||
result = await tool_fn(action="disk", subaction="disks")
|
||||
assert len(result["disks"]) == 1
|
||||
|
||||
async def test_disk_details(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -194,7 +190,7 @@ class TestStorageActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disk_details", disk_id="d:1")
|
||||
result = await tool_fn(action="disk", subaction="disk_details", disk_id="d:1")
|
||||
assert result["summary"]["temperature"] == "35\u00b0C"
|
||||
assert "1.00 GB" in result["summary"]["size_formatted"]
|
||||
|
||||
@@ -211,7 +207,7 @@ class TestStorageActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disk_details", disk_id="d:1")
|
||||
result = await tool_fn(action="disk", subaction="disk_details", disk_id="d:1")
|
||||
assert result["summary"]["temperature"] == "0\u00b0C"
|
||||
|
||||
async def test_disk_details_temperature_null(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -227,26 +223,26 @@ class TestStorageActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disk_details", disk_id="d:1")
|
||||
result = await tool_fn(action="disk", subaction="disk_details", disk_id="d:1")
|
||||
assert result["summary"]["temperature"] == "N/A"
|
||||
|
||||
async def test_logs_null_log_file(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""logFile being null should return an empty dict."""
|
||||
_mock_graphql.return_value = {"logFile": None}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
||||
result = await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog")
|
||||
assert result == {}
|
||||
|
||||
async def test_disk_details_not_found(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"disk": None}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="not found"):
|
||||
await tool_fn(action="disk_details", disk_id="d:missing")
|
||||
await tool_fn(action="disk", subaction="disk_details", disk_id="d:missing")
|
||||
|
||||
async def test_log_files(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"logFiles": [{"name": "syslog", "path": "/var/log/syslog"}]}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="log_files")
|
||||
result = await tool_fn(action="disk", subaction="log_files")
|
||||
assert len(result["log_files"]) == 1
|
||||
|
||||
async def test_logs(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -254,7 +250,7 @@ class TestStorageActions:
|
||||
"logFile": {"path": "/var/log/syslog", "content": "log line", "totalLines": 1}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
||||
result = await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog")
|
||||
assert result["content"] == "log line"
|
||||
|
||||
|
||||
@@ -268,7 +264,7 @@ class TestStorageNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid JSON"):
|
||||
await tool_fn(action="logs", log_path="/var/log/syslog")
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog")
|
||||
|
||||
async def test_shares_connection_refused(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Connection refused when listing shares should propagate as ToolError."""
|
||||
@@ -277,44 +273,55 @@ class TestStorageNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Connection refused"):
|
||||
await tool_fn(action="shares")
|
||||
await tool_fn(action="disk", subaction="shares")
|
||||
|
||||
async def test_disks_http_500(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""HTTP 500 when listing disks should propagate as ToolError."""
|
||||
_mock_graphql.side_effect = ToolError("HTTP error 500: Internal Server Error")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="HTTP error 500"):
|
||||
await tool_fn(action="disks")
|
||||
await tool_fn(action="disk", subaction="disks")
|
||||
|
||||
|
||||
class TestStorageFlashBackup:
|
||||
async def test_flash_backup_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(
|
||||
action="flash_backup", remote_name="r", source_path="/boot", destination_path="r:b"
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
destination_path="r:b",
|
||||
)
|
||||
|
||||
async def test_flash_backup_requires_remote_name(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="remote_name"):
|
||||
await tool_fn(action="flash_backup", confirm=True)
|
||||
await tool_fn(action="disk", subaction="flash_backup", confirm=True)
|
||||
|
||||
async def test_flash_backup_requires_source_path(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="source_path"):
|
||||
await tool_fn(action="flash_backup", confirm=True, remote_name="r")
|
||||
await tool_fn(action="disk", subaction="flash_backup", confirm=True, remote_name="r")
|
||||
|
||||
async def test_flash_backup_requires_destination_path(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destination_path"):
|
||||
await tool_fn(action="flash_backup", confirm=True, remote_name="r", source_path="/boot")
|
||||
await tool_fn(
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
confirm=True,
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
)
|
||||
|
||||
async def test_flash_backup_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"initiateFlashBackup": {"status": "started", "jobId": "j:1"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="flash_backup",
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
confirm=True,
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
@@ -327,7 +334,8 @@ class TestStorageFlashBackup:
|
||||
_mock_graphql.return_value = {"initiateFlashBackup": {"status": "started", "jobId": "j:2"}}
|
||||
tool_fn = _make_tool()
|
||||
await tool_fn(
|
||||
action="flash_backup",
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
confirm=True,
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_users tool.
|
||||
"""Tests for user subactions of the consolidated unraid tool.
|
||||
|
||||
NOTE: Unraid GraphQL API only supports the me() query.
|
||||
User management operations (list, add, delete, cloud, remote_access, origins) are NOT available in the API.
|
||||
@@ -15,35 +15,35 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.users.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.users", "register_users_tool", "unraid_users")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestUsersValidation:
|
||||
"""Test validation for invalid actions."""
|
||||
"""Test validation for invalid subactions."""
|
||||
|
||||
async def test_invalid_action_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test that non-existent actions are rejected with clear error."""
|
||||
async def test_invalid_subaction_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test that non-existent subactions are rejected with clear error."""
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="list")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="user", subaction="list")
|
||||
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="add")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="user", subaction="add")
|
||||
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="delete")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="user", subaction="delete")
|
||||
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="cloud")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="user", subaction="cloud")
|
||||
|
||||
|
||||
class TestUsersActions:
|
||||
"""Test the single supported action: me."""
|
||||
"""Test the single supported subaction: me."""
|
||||
|
||||
async def test_me(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test querying current authenticated user."""
|
||||
@@ -51,27 +51,18 @@ class TestUsersActions:
|
||||
"me": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="me")
|
||||
result = await tool_fn(action="user", subaction="me")
|
||||
assert result["name"] == "root"
|
||||
assert result["roles"] == ["ADMIN"]
|
||||
_mock_graphql.assert_called_once()
|
||||
|
||||
async def test_me_default_action(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test that 'me' is the default action."""
|
||||
_mock_graphql.return_value = {
|
||||
"me": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn()
|
||||
assert result["name"] == "root"
|
||||
|
||||
|
||||
class TestUsersNoneHandling:
|
||||
"""Verify actions return empty dict (not TypeError) when API returns None."""
|
||||
"""Verify subactions return empty dict (not TypeError) when API returns None."""
|
||||
|
||||
async def test_me_returns_none(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test that me returns empty dict when API returns None."""
|
||||
_mock_graphql.return_value = {"me": None}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="me")
|
||||
result = await tool_fn(action="user", subaction="me")
|
||||
assert result == {}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_vm tool."""
|
||||
"""Tests for vm subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,34 +11,32 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch(
|
||||
"unraid_mcp.tools.virtualization.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestVmValidation:
|
||||
async def test_actions_except_list_require_vm_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
for action in ("details", "start", "stop", "pause", "resume", "reboot"):
|
||||
for subaction in ("details", "start", "stop", "pause", "resume", "reboot"):
|
||||
with pytest.raises(ToolError, match="vm_id"):
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action="vm", subaction=subaction)
|
||||
|
||||
async def test_destructive_actions_require_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
for action in ("force_stop", "reset"):
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action=action, vm_id="uuid-1")
|
||||
for subaction in ("force_stop", "reset"):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(action="vm", subaction=subaction, vm_id="uuid-1")
|
||||
|
||||
async def test_destructive_vm_id_check_before_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Destructive actions without vm_id should fail on vm_id first (validated before confirm)."""
|
||||
"""Destructive subactions without vm_id should fail on vm_id first (validated before confirm)."""
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="vm_id"):
|
||||
await tool_fn(action="force_stop")
|
||||
await tool_fn(action="vm", subaction="force_stop")
|
||||
|
||||
|
||||
class TestVmActions:
|
||||
@@ -51,20 +49,20 @@ class TestVmActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="vm", subaction="list")
|
||||
assert len(result["vms"]) == 1
|
||||
assert result["vms"][0]["name"] == "Windows 11"
|
||||
|
||||
async def test_list_empty(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vms": {"domains": []}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="vm", subaction="list")
|
||||
assert result["vms"] == []
|
||||
|
||||
async def test_list_no_vms_key(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="vm", subaction="list")
|
||||
assert result["vms"] == []
|
||||
|
||||
async def test_details_by_uuid(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -74,7 +72,7 @@ class TestVmActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="details", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="details", vm_id="uuid-1")
|
||||
assert result["name"] == "Win11"
|
||||
|
||||
async def test_details_by_name(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -84,7 +82,7 @@ class TestVmActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="details", vm_id="Win11")
|
||||
result = await tool_fn(action="vm", subaction="details", vm_id="Win11")
|
||||
assert result["uuid"] == "uuid-1"
|
||||
|
||||
async def test_details_not_found(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -95,48 +93,48 @@ class TestVmActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="not found"):
|
||||
await tool_fn(action="details", vm_id="nonexistent")
|
||||
await tool_fn(action="vm", subaction="details", vm_id="nonexistent")
|
||||
|
||||
async def test_start_vm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"start": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "start"
|
||||
assert result["subaction"] == "start"
|
||||
|
||||
async def test_force_stop(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"forceStop": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="force_stop", vm_id="uuid-1", confirm=True)
|
||||
result = await tool_fn(action="vm", subaction="force_stop", vm_id="uuid-1", confirm=True)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "force_stop"
|
||||
assert result["subaction"] == "force_stop"
|
||||
|
||||
async def test_stop_vm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"stop": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="stop", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="stop", vm_id="uuid-1")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "stop"
|
||||
assert result["subaction"] == "stop"
|
||||
|
||||
async def test_pause_vm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"pause": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="pause", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="pause", vm_id="uuid-1")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "pause"
|
||||
assert result["subaction"] == "pause"
|
||||
|
||||
async def test_resume_vm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"resume": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="resume", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="resume", vm_id="uuid-1")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "resume"
|
||||
assert result["subaction"] == "resume"
|
||||
|
||||
async def test_mutation_unexpected_response(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {}}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to start"):
|
||||
await tool_fn(action="start", vm_id="uuid-1")
|
||||
await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
|
||||
|
||||
class TestVmMutationFailures:
|
||||
@@ -147,38 +145,38 @@ class TestVmMutationFailures:
|
||||
_mock_graphql.return_value = {}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to start"):
|
||||
await tool_fn(action="start", vm_id="uuid-1")
|
||||
await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
|
||||
async def test_start_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""VM start returning False should still succeed (the tool reports the raw value)."""
|
||||
_mock_graphql.return_value = {"vm": {"start": False}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
assert result["success"] is False
|
||||
assert result["action"] == "start"
|
||||
assert result["subaction"] == "start"
|
||||
|
||||
async def test_stop_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""VM stop returning None in the field should succeed (key exists, value is None)."""
|
||||
_mock_graphql.return_value = {"vm": {"stop": None}}
|
||||
tool_fn = _make_tool()
|
||||
# The check is `field in data["vm"]` — `in` checks key existence, not truthiness
|
||||
result = await tool_fn(action="stop", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="stop", vm_id="uuid-1")
|
||||
assert result["success"] is None
|
||||
assert result["action"] == "stop"
|
||||
assert result["subaction"] == "stop"
|
||||
|
||||
async def test_force_stop_mutation_empty_vm_object(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Empty vm object with no matching field should raise ToolError."""
|
||||
_mock_graphql.return_value = {"vm": {}}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to force_stop"):
|
||||
await tool_fn(action="force_stop", vm_id="uuid-1", confirm=True)
|
||||
await tool_fn(action="vm", subaction="force_stop", vm_id="uuid-1", confirm=True)
|
||||
|
||||
async def test_reboot_mutation_vm_key_none(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""vm key being None should raise ToolError."""
|
||||
_mock_graphql.return_value = {"vm": None}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to reboot"):
|
||||
await tool_fn(action="reboot", vm_id="uuid-1")
|
||||
await tool_fn(action="vm", subaction="reboot", vm_id="uuid-1")
|
||||
|
||||
async def test_mutation_timeout(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Mid-operation timeout should be wrapped in ToolError."""
|
||||
@@ -186,4 +184,4 @@ class TestVmMutationFailures:
|
||||
_mock_graphql.side_effect = TimeoutError("VM operation timed out")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="timed out"):
|
||||
await tool_fn(action="start", vm_id="uuid-1")
|
||||
await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
|
||||
@@ -76,6 +76,41 @@ elif raw_verify_ssl in ["true", "1", "yes"]:
|
||||
else: # Path to CA bundle
|
||||
UNRAID_VERIFY_SSL = raw_verify_ssl
|
||||
|
||||
# Google OAuth Configuration (Optional)
|
||||
# -------------------------------------
|
||||
# When set, the MCP HTTP server requires Google login before tool calls.
|
||||
# UNRAID_MCP_BASE_URL must match the public URL clients use to reach this server.
|
||||
# Google Cloud Console → Credentials → Authorized redirect URIs:
|
||||
# Add: <UNRAID_MCP_BASE_URL>/auth/callback
|
||||
GOOGLE_CLIENT_ID = os.getenv("GOOGLE_CLIENT_ID", "")
|
||||
GOOGLE_CLIENT_SECRET = os.getenv("GOOGLE_CLIENT_SECRET", "")
|
||||
UNRAID_MCP_BASE_URL = os.getenv("UNRAID_MCP_BASE_URL", "")
|
||||
|
||||
# JWT signing key for FastMCP OAuth tokens.
|
||||
# MUST be set to a stable secret so tokens survive server restarts.
|
||||
# Generate once: python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
# Never change this value — all existing tokens will be invalidated.
|
||||
UNRAID_MCP_JWT_SIGNING_KEY = os.getenv("UNRAID_MCP_JWT_SIGNING_KEY", "")
|
||||
|
||||
|
||||
def is_google_auth_configured() -> bool:
|
||||
"""Return True when all required Google OAuth vars are present."""
|
||||
return bool(GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET and UNRAID_MCP_BASE_URL)
|
||||
|
||||
|
||||
# API Key Authentication (Optional)
|
||||
# ----------------------------------
|
||||
# A static bearer token clients can use instead of (or alongside) Google OAuth.
|
||||
# Can be set to the same value as UNRAID_API_KEY for simplicity, or a separate
|
||||
# dedicated secret for MCP access.
|
||||
UNRAID_MCP_API_KEY = os.getenv("UNRAID_MCP_API_KEY", "")
|
||||
|
||||
|
||||
def is_api_key_auth_configured() -> bool:
|
||||
"""Return True when UNRAID_MCP_API_KEY is set."""
|
||||
return bool(UNRAID_MCP_API_KEY)
|
||||
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL_STR = os.getenv("UNRAID_MCP_LOG_LEVEL", "INFO").upper()
|
||||
LOG_FILE_NAME = os.getenv("UNRAID_MCP_LOG_FILE", "unraid-mcp.log")
|
||||
@@ -155,6 +190,10 @@ def get_config_summary() -> dict[str, Any]:
|
||||
"log_file": str(LOG_FILE_PATH),
|
||||
"config_valid": is_valid,
|
||||
"missing_config": missing if not is_valid else None,
|
||||
"google_auth_enabled": is_google_auth_configured(),
|
||||
"google_auth_base_url": UNRAID_MCP_BASE_URL if is_google_auth_configured() else None,
|
||||
"jwt_signing_key_configured": bool(UNRAID_MCP_JWT_SIGNING_KEY),
|
||||
"api_key_auth_enabled": is_api_key_auth_configured(),
|
||||
}
|
||||
|
||||
|
||||
|
||||
110
unraid_mcp/core/guards.py
Normal file
110
unraid_mcp/core/guards.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Destructive action gating via MCP elicitation.
|
||||
|
||||
Provides gate_destructive_action() — a single call to guard any destructive
|
||||
tool action with interactive user confirmation or confirm=True bypass.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastmcp import Context
|
||||
|
||||
from ..config.logging import logger
|
||||
from .exceptions import ToolError
|
||||
|
||||
|
||||
class _ConfirmAction(BaseModel):
|
||||
confirmed: bool = Field(False, description="Check the box to confirm and proceed")
|
||||
|
||||
|
||||
async def elicit_destructive_confirmation(
|
||||
ctx: "Context | None", action: str, description: str
|
||||
) -> bool:
|
||||
"""Prompt the user to confirm a destructive action via MCP elicitation.
|
||||
|
||||
Args:
|
||||
ctx: The MCP context. If None, returns False immediately.
|
||||
action: Action name shown in the prompt.
|
||||
description: Human-readable description of what the action will do.
|
||||
|
||||
Returns:
|
||||
True if the user confirmed, False otherwise.
|
||||
"""
|
||||
if ctx is None:
|
||||
logger.warning(
|
||||
"Cannot elicit confirmation for '%s': no MCP context available. "
|
||||
"Re-run with confirm=True to bypass elicitation.",
|
||||
action,
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
result = await ctx.elicit(
|
||||
message=(
|
||||
f"**Confirm destructive action: `{action}`**\n\n"
|
||||
f"{description}\n\n"
|
||||
"Are you sure you want to proceed?"
|
||||
),
|
||||
response_type=_ConfirmAction,
|
||||
)
|
||||
except NotImplementedError:
|
||||
logger.warning(
|
||||
"MCP client does not support elicitation for action '%s'. "
|
||||
"Re-run with confirm=True to bypass.",
|
||||
action,
|
||||
)
|
||||
return False
|
||||
|
||||
if result.action != "accept":
|
||||
logger.info("Destructive action '%s' declined by user (%s).", action, result.action)
|
||||
return False
|
||||
|
||||
confirmed: bool = result.data.confirmed # type: ignore[union-attr]
|
||||
if not confirmed:
|
||||
logger.info("Destructive action '%s' not confirmed by user.", action)
|
||||
return confirmed
|
||||
|
||||
|
||||
async def gate_destructive_action(
|
||||
ctx: "Context | None",
|
||||
action: str,
|
||||
destructive_actions: set[str],
|
||||
confirm: bool,
|
||||
description: str | dict[str, str],
|
||||
) -> None:
|
||||
"""Gate a destructive action with elicitation or confirm=True bypass.
|
||||
|
||||
Does nothing if the action is not in destructive_actions or confirm=True.
|
||||
Otherwise calls elicit_destructive_confirmation; raises ToolError if the
|
||||
user declines or elicitation is unavailable.
|
||||
|
||||
Args:
|
||||
ctx: MCP context for elicitation (None skips elicitation).
|
||||
action: The action being requested.
|
||||
destructive_actions: Set of action names considered destructive.
|
||||
confirm: When True, bypasses elicitation and proceeds immediately.
|
||||
description: Human-readable description of the action's impact.
|
||||
Pass a str when one description covers all destructive actions.
|
||||
Pass a dict[action_name, description] when descriptions differ.
|
||||
"""
|
||||
if action not in destructive_actions:
|
||||
return
|
||||
|
||||
if confirm:
|
||||
logger.info("Destructive action '%s' bypassed via confirm=True.", action)
|
||||
return
|
||||
|
||||
if isinstance(description, dict):
|
||||
desc = description.get(action)
|
||||
if desc is None:
|
||||
raise ToolError(f"Missing destructive-action description for '{action}'.")
|
||||
else:
|
||||
desc = description
|
||||
confirmed = await elicit_destructive_confirmation(ctx, action, desc)
|
||||
if not confirmed:
|
||||
raise ToolError(
|
||||
f"Action '{action}' was not confirmed. Re-run with confirm=True to bypass elicitation."
|
||||
)
|
||||
@@ -29,51 +29,42 @@ class _UnraidCredentials:
|
||||
api_key: str
|
||||
|
||||
|
||||
async def elicit_destructive_confirmation(
|
||||
ctx: Context | None, action: str, description: str
|
||||
) -> bool:
|
||||
"""Prompt the user to confirm a destructive action via MCP elicitation.
|
||||
async def elicit_reset_confirmation(ctx: Context | None, current_url: str) -> bool:
|
||||
"""Ask the user whether to overwrite existing credentials.
|
||||
|
||||
Args:
|
||||
ctx: The MCP context for elicitation. If None, returns False immediately.
|
||||
action: The action name (for display in the prompt).
|
||||
description: Human-readable description of what the action will do.
|
||||
current_url: The currently configured URL and status (displayed for context).
|
||||
|
||||
Returns:
|
||||
True if the user accepted, False if declined, cancelled, or no context.
|
||||
True if the user confirmed the reset, False otherwise.
|
||||
"""
|
||||
if ctx is None:
|
||||
logger.warning(
|
||||
"Cannot elicit confirmation for '%s': no MCP context available. "
|
||||
"Re-run with confirm=True to bypass elicitation.",
|
||||
action,
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
result = await ctx.elicit(
|
||||
message=(
|
||||
f"**Confirm destructive action: `{action}`**\n\n"
|
||||
f"{description}\n\n"
|
||||
"Are you sure you want to proceed?"
|
||||
"Credentials are already configured.\n\n"
|
||||
f"**Current URL:** `{current_url}`\n\n"
|
||||
"Do you want to reset your API URL and key?"
|
||||
),
|
||||
response_type=bool,
|
||||
)
|
||||
except NotImplementedError:
|
||||
# Client doesn't support elicitation — treat as "proceed with reset" so
|
||||
# non-interactive clients (stdio, CI) are not permanently blocked from
|
||||
# reconfiguring credentials.
|
||||
logger.warning(
|
||||
"MCP client does not support elicitation for action '%s'. "
|
||||
"Re-run with confirm=True to bypass.",
|
||||
action,
|
||||
"MCP client does not support elicitation for reset confirmation — proceeding with reset."
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
if result.action != "accept":
|
||||
logger.info("Destructive action '%s' declined by user (%s).", action, result.action)
|
||||
logger.info("Credential reset declined by user (%s).", result.action)
|
||||
return False
|
||||
|
||||
confirmed: bool = result.data # type: ignore[union-attr]
|
||||
if not confirmed:
|
||||
logger.info("Destructive action '%s' not confirmed by user.", action)
|
||||
return confirmed
|
||||
|
||||
|
||||
@@ -94,7 +85,7 @@ async def elicit_and_configure(ctx: Context | None) -> bool:
|
||||
if ctx is None:
|
||||
logger.warning(
|
||||
"Cannot elicit credentials: no MCP context available. "
|
||||
"Run unraid_health action=setup to configure credentials."
|
||||
"Run unraid(action=health, subaction=setup) to configure credentials."
|
||||
)
|
||||
return False
|
||||
|
||||
@@ -111,7 +102,7 @@ async def elicit_and_configure(ctx: Context | None) -> bool:
|
||||
except NotImplementedError:
|
||||
logger.warning(
|
||||
"MCP client does not support elicitation. "
|
||||
"Use unraid_health action=setup or create %s manually.",
|
||||
"Use unraid(action=health, subaction=setup) or create %s manually.",
|
||||
CREDENTIALS_ENV_PATH,
|
||||
)
|
||||
return False
|
||||
|
||||
@@ -22,8 +22,6 @@ class SubscriptionData:
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.last_updated.tzinfo is None:
|
||||
raise ValueError(
|
||||
"last_updated must be timezone-aware; use datetime.now(UTC)"
|
||||
)
|
||||
raise ValueError("last_updated must be timezone-aware; use datetime.now(UTC)")
|
||||
if not self.subscription_type.strip():
|
||||
raise ValueError("subscription_type must be a non-empty string")
|
||||
|
||||
@@ -4,12 +4,22 @@ This is the main server implementation using the modular architecture with
|
||||
separate modules for configuration, core functionality, subscriptions, and tools.
|
||||
"""
|
||||
|
||||
import hmac
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.server.auth import AccessToken, MultiAuth, TokenVerifier
|
||||
from fastmcp.server.auth.providers.google import GoogleProvider
|
||||
from fastmcp.server.middleware.caching import CallToolSettings, ResponseCachingMiddleware
|
||||
from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
|
||||
from fastmcp.server.middleware.logging import LoggingMiddleware
|
||||
from fastmcp.server.middleware.rate_limiting import SlidingWindowRateLimitingMiddleware
|
||||
from fastmcp.server.middleware.response_limiting import ResponseLimitingMiddleware
|
||||
|
||||
from .config.logging import logger
|
||||
from .config.settings import (
|
||||
LOG_LEVEL_STR,
|
||||
UNRAID_MCP_HOST,
|
||||
UNRAID_MCP_PORT,
|
||||
UNRAID_MCP_TRANSPORT,
|
||||
@@ -19,33 +29,184 @@ from .config.settings import (
|
||||
)
|
||||
from .subscriptions.diagnostics import register_diagnostic_tools
|
||||
from .subscriptions.resources import register_subscription_resources
|
||||
from .tools.array import register_array_tool
|
||||
from .tools.customization import register_customization_tool
|
||||
from .tools.docker import register_docker_tool
|
||||
from .tools.health import register_health_tool
|
||||
from .tools.info import register_info_tool
|
||||
from .tools.keys import register_keys_tool
|
||||
from .tools.live import register_live_tool
|
||||
from .tools.notifications import register_notifications_tool
|
||||
from .tools.oidc import register_oidc_tool
|
||||
from .tools.plugins import register_plugins_tool
|
||||
from .tools.rclone import register_rclone_tool
|
||||
from .tools.settings import register_settings_tool
|
||||
from .tools.storage import register_storage_tool
|
||||
from .tools.users import register_users_tool
|
||||
from .tools.virtualization import register_vm_tool
|
||||
from .tools.unraid import register_unraid_tool
|
||||
|
||||
|
||||
# Middleware chain order matters — each layer wraps everything inside it:
|
||||
# logging → error_handling → rate_limiter → response_limiter → cache → tool
|
||||
|
||||
# 1. Log every tools/call and resources/read: method, duration, errors.
|
||||
# Outermost so it captures errors after they've been converted by error_handling.
|
||||
_logging_middleware = LoggingMiddleware(
|
||||
logger=logger,
|
||||
methods=["tools/call", "resources/read"],
|
||||
)
|
||||
|
||||
# 2. Catch any unhandled exceptions and convert to proper MCP errors.
|
||||
# Tracks error_counts per (exception_type:method) for health diagnose.
|
||||
_error_middleware = ErrorHandlingMiddleware(
|
||||
logger=logger,
|
||||
include_traceback=LOG_LEVEL_STR == "DEBUG",
|
||||
)
|
||||
|
||||
# 3. Unraid API rate limit: 100 requests per 10 seconds.
|
||||
# SlidingWindowRateLimitingMiddleware only accepts window_minutes (int), so express
|
||||
# the 10-second budget as a 1-minute equivalent: 540 req/60 s to stay comfortably
|
||||
# under the 600 req/min ceiling.
|
||||
_rate_limiter = SlidingWindowRateLimitingMiddleware(max_requests=540, window_minutes=1)
|
||||
|
||||
# 4. Cap tool responses at 512 KB to protect the client context window.
|
||||
# Oversized responses are truncated with a clear suffix rather than erroring.
|
||||
_response_limiter = ResponseLimitingMiddleware(max_size=512_000)
|
||||
|
||||
# 5. Cache middleware — all call_tool caching is disabled for the `unraid` tool.
|
||||
# CallToolSettings supports excluded_tools/included_tools by tool name only; there
|
||||
# is no per-argument or per-subaction exclusion mechanism. The cache key is
|
||||
# "{tool_name}:{arguments_str}", so a cached stop("nginx") result would be served
|
||||
# back on a retry within the TTL window even though the container is already stopped.
|
||||
# Mutation subactions (start, stop, restart, reboot, etc.) must never be cached.
|
||||
# Because the consolidated `unraid` tool mixes reads and mutations under one name,
|
||||
# the only safe option is to disable caching for the entire tool.
|
||||
_cache_middleware = ResponseCachingMiddleware(
|
||||
call_tool_settings=CallToolSettings(
|
||||
enabled=False,
|
||||
),
|
||||
# Disable caching for list/resource/prompt — those are cheap.
|
||||
list_tools_settings={"enabled": False},
|
||||
list_resources_settings={"enabled": False},
|
||||
list_prompts_settings={"enabled": False},
|
||||
read_resource_settings={"enabled": False},
|
||||
get_prompt_settings={"enabled": False},
|
||||
)
|
||||
|
||||
|
||||
class ApiKeyVerifier(TokenVerifier):
|
||||
"""Bearer token verifier that validates against a static API key.
|
||||
|
||||
Clients present the key as a standard OAuth bearer token:
|
||||
Authorization: Bearer <UNRAID_MCP_API_KEY>
|
||||
|
||||
This allows machine-to-machine access (e.g. CI, scripts, other agents)
|
||||
without going through the Google OAuth browser flow.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key: str) -> None:
|
||||
super().__init__()
|
||||
self._api_key = api_key
|
||||
|
||||
async def verify_token(self, token: str) -> AccessToken | None:
|
||||
if self._api_key and hmac.compare_digest(token.encode(), self._api_key.encode()):
|
||||
return AccessToken(
|
||||
token=token,
|
||||
client_id="api-key-client",
|
||||
scopes=[],
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _build_google_auth() -> "GoogleProvider | None":
|
||||
"""Build GoogleProvider when OAuth env vars are configured, else return None.
|
||||
|
||||
Returns None (no auth) when GOOGLE_CLIENT_ID or GOOGLE_CLIENT_SECRET are absent,
|
||||
preserving backward compatibility for existing unprotected setups.
|
||||
"""
|
||||
from .config.settings import (
|
||||
GOOGLE_CLIENT_ID,
|
||||
GOOGLE_CLIENT_SECRET,
|
||||
UNRAID_MCP_BASE_URL,
|
||||
UNRAID_MCP_JWT_SIGNING_KEY,
|
||||
UNRAID_MCP_TRANSPORT,
|
||||
is_google_auth_configured,
|
||||
)
|
||||
|
||||
if not is_google_auth_configured():
|
||||
return None
|
||||
|
||||
if UNRAID_MCP_TRANSPORT == "stdio":
|
||||
logger.warning(
|
||||
"Google OAuth is configured but UNRAID_MCP_TRANSPORT=stdio. "
|
||||
"OAuth requires HTTP transport (streamable-http or sse). "
|
||||
"Auth will be applied but may not work as expected."
|
||||
)
|
||||
|
||||
kwargs: dict[str, Any] = {
|
||||
"client_id": GOOGLE_CLIENT_ID,
|
||||
"client_secret": GOOGLE_CLIENT_SECRET,
|
||||
"base_url": UNRAID_MCP_BASE_URL,
|
||||
# Prefer short-lived access tokens without refresh-token rotation churn.
|
||||
# This reduces reconnect instability in MCP clients that re-auth frequently.
|
||||
"extra_authorize_params": {"access_type": "online", "prompt": "consent"},
|
||||
# Skip the FastMCP consent page — goes directly to Google.
|
||||
# The consent page has a CSRF double-load race: two concurrent GET requests
|
||||
# each regenerate the CSRF token, the second overwrites the first in the
|
||||
# transaction store, and the POST fails with "Invalid or expired consent token".
|
||||
"require_authorization_consent": False,
|
||||
}
|
||||
if UNRAID_MCP_JWT_SIGNING_KEY:
|
||||
kwargs["jwt_signing_key"] = UNRAID_MCP_JWT_SIGNING_KEY
|
||||
else:
|
||||
logger.warning(
|
||||
"UNRAID_MCP_JWT_SIGNING_KEY is not set. FastMCP will derive a key automatically, "
|
||||
"but tokens may be invalidated on server restart. "
|
||||
"Set UNRAID_MCP_JWT_SIGNING_KEY to a stable secret."
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Google OAuth enabled — base_url={UNRAID_MCP_BASE_URL}, "
|
||||
f"redirect_uri={UNRAID_MCP_BASE_URL}/auth/callback"
|
||||
)
|
||||
return GoogleProvider(**kwargs)
|
||||
|
||||
|
||||
def _build_auth() -> "GoogleProvider | ApiKeyVerifier | MultiAuth | None":
|
||||
"""Build the active auth stack from environment configuration.
|
||||
|
||||
Returns:
|
||||
- MultiAuth(server=GoogleProvider, verifiers=[ApiKeyVerifier])
|
||||
when both GOOGLE_CLIENT_ID and UNRAID_MCP_API_KEY are set.
|
||||
- GoogleProvider alone when only Google OAuth vars are set.
|
||||
- ApiKeyVerifier alone when only UNRAID_MCP_API_KEY is set.
|
||||
- None when no auth vars are configured (open server).
|
||||
"""
|
||||
from .config.settings import UNRAID_MCP_API_KEY, is_api_key_auth_configured
|
||||
|
||||
google = _build_google_auth()
|
||||
api_key = ApiKeyVerifier(UNRAID_MCP_API_KEY) if is_api_key_auth_configured() else None
|
||||
|
||||
if google and api_key:
|
||||
logger.info("Auth: Google OAuth + API key both enabled (MultiAuth)")
|
||||
return MultiAuth(server=google, verifiers=[api_key])
|
||||
if api_key:
|
||||
logger.info("Auth: API key authentication enabled")
|
||||
return api_key
|
||||
return google # GoogleProvider or None
|
||||
|
||||
|
||||
# Build auth stack — GoogleProvider, ApiKeyVerifier, MultiAuth, or None.
|
||||
_auth = _build_auth()
|
||||
|
||||
# Initialize FastMCP instance
|
||||
mcp = FastMCP(
|
||||
name="Unraid MCP Server",
|
||||
instructions="Provides tools to interact with an Unraid server's GraphQL API.",
|
||||
version=VERSION,
|
||||
auth=_auth,
|
||||
middleware=[
|
||||
_logging_middleware,
|
||||
_error_middleware,
|
||||
_rate_limiter,
|
||||
_response_limiter,
|
||||
_cache_middleware,
|
||||
],
|
||||
)
|
||||
|
||||
# Note: SubscriptionManager singleton is defined in subscriptions/manager.py
|
||||
# and imported by resources.py - no duplicate instance needed here
|
||||
|
||||
# Register all modules at import time so `fastmcp run server.py --reload` can
|
||||
# discover the fully-configured `mcp` object without going through run_server().
|
||||
# run_server() no longer calls this — tools are registered exactly once here.
|
||||
|
||||
|
||||
def register_all_modules() -> None:
|
||||
"""Register all tools and resources with the MCP instance."""
|
||||
@@ -55,34 +216,18 @@ def register_all_modules() -> None:
|
||||
register_diagnostic_tools(mcp)
|
||||
logger.info("Subscription resources and diagnostic tools registered")
|
||||
|
||||
# Register all consolidated tools
|
||||
registrars = [
|
||||
register_info_tool,
|
||||
register_array_tool,
|
||||
register_storage_tool,
|
||||
register_docker_tool,
|
||||
register_vm_tool,
|
||||
register_notifications_tool,
|
||||
register_plugins_tool,
|
||||
register_rclone_tool,
|
||||
register_users_tool,
|
||||
register_keys_tool,
|
||||
register_health_tool,
|
||||
register_settings_tool,
|
||||
register_live_tool,
|
||||
register_customization_tool,
|
||||
register_oidc_tool,
|
||||
]
|
||||
for registrar in registrars:
|
||||
registrar(mcp)
|
||||
|
||||
logger.info(f"All {len(registrars)} tools registered successfully - Server ready!")
|
||||
# Register the consolidated unraid tool
|
||||
register_unraid_tool(mcp)
|
||||
logger.info("unraid tool registered successfully - Server ready!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register modules: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
register_all_modules()
|
||||
|
||||
|
||||
def run_server() -> None:
|
||||
"""Run the MCP server with the configured transport."""
|
||||
# Validate required configuration before anything else
|
||||
@@ -105,8 +250,26 @@ def run_server() -> None:
|
||||
"Only use this in trusted networks or for development."
|
||||
)
|
||||
|
||||
# Register all modules
|
||||
register_all_modules()
|
||||
if _auth is not None:
|
||||
from .config.settings import is_google_auth_configured
|
||||
|
||||
if is_google_auth_configured():
|
||||
from .config.settings import UNRAID_MCP_BASE_URL
|
||||
|
||||
logger.info(
|
||||
"Google OAuth ENABLED — clients must authenticate before calling tools. "
|
||||
f"Redirect URI: {UNRAID_MCP_BASE_URL}/auth/callback"
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"API key authentication ENABLED — present UNRAID_MCP_API_KEY as bearer token."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"No authentication configured — MCP server is open to all clients on the network. "
|
||||
"Set GOOGLE_CLIENT_ID + GOOGLE_CLIENT_SECRET + UNRAID_MCP_BASE_URL to enable Google OAuth, "
|
||||
"or set UNRAID_MCP_API_KEY to enable bearer token authentication."
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Starting Unraid MCP Server on {UNRAID_MCP_HOST}:{UNRAID_MCP_PORT} using {UNRAID_MCP_TRANSPORT} transport..."
|
||||
|
||||
@@ -15,13 +15,18 @@ import websockets
|
||||
from fastmcp import FastMCP
|
||||
from websockets.typing import Subprotocol
|
||||
|
||||
from ..config import settings as _settings
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_KEY, UNRAID_API_URL
|
||||
from ..core.exceptions import ToolError
|
||||
from ..core.utils import safe_display_url
|
||||
from .manager import subscription_manager
|
||||
from .resources import ensure_subscriptions_started
|
||||
from .utils import _analyze_subscription_status, build_ws_ssl_context, build_ws_url
|
||||
from .utils import (
|
||||
_analyze_subscription_status,
|
||||
build_connection_init,
|
||||
build_ws_ssl_context,
|
||||
build_ws_url,
|
||||
)
|
||||
|
||||
|
||||
# Schema field names that appear inside the selection set of allowed subscriptions.
|
||||
@@ -125,15 +130,8 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
ping_interval=30,
|
||||
ping_timeout=10,
|
||||
) as websocket:
|
||||
# Send connection init (using standard X-API-Key format)
|
||||
await websocket.send(
|
||||
json.dumps(
|
||||
{
|
||||
"type": "connection_init",
|
||||
"payload": {"x-api-key": UNRAID_API_KEY},
|
||||
}
|
||||
)
|
||||
)
|
||||
# Send connection init
|
||||
await websocket.send(json.dumps(build_connection_init()))
|
||||
|
||||
# Wait for ack
|
||||
response = await websocket.recv()
|
||||
@@ -203,7 +201,7 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
|
||||
# Calculate WebSocket URL
|
||||
ws_url_display: str | None = None
|
||||
if UNRAID_API_URL:
|
||||
if _settings.UNRAID_API_URL:
|
||||
try:
|
||||
ws_url_display = build_ws_url()
|
||||
except ValueError:
|
||||
@@ -215,8 +213,8 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
"environment": {
|
||||
"auto_start_enabled": subscription_manager.auto_start_enabled,
|
||||
"max_reconnect_attempts": subscription_manager.max_reconnect_attempts,
|
||||
"unraid_api_url": safe_display_url(UNRAID_API_URL),
|
||||
"api_key_configured": bool(UNRAID_API_KEY),
|
||||
"unraid_api_url": safe_display_url(_settings.UNRAID_API_URL),
|
||||
"api_key_configured": bool(_settings.UNRAID_API_KEY),
|
||||
"websocket_url": ws_url_display,
|
||||
},
|
||||
"subscriptions": status,
|
||||
|
||||
@@ -15,11 +15,11 @@ from typing import Any
|
||||
import websockets
|
||||
from websockets.typing import Subprotocol
|
||||
|
||||
from ..config import settings as _settings
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_KEY
|
||||
from ..core.client import redact_sensitive
|
||||
from ..core.types import SubscriptionData
|
||||
from .utils import build_ws_ssl_context, build_ws_url
|
||||
from .utils import build_connection_init, build_ws_ssl_context, build_ws_url
|
||||
|
||||
|
||||
# Resource data size limits to prevent unbounded memory growth
|
||||
@@ -100,8 +100,18 @@ class SubscriptionManager:
|
||||
self._connection_start_times: dict[str, float] = {} # Track when connections started
|
||||
|
||||
# Define subscription configurations
|
||||
self.subscription_configs = {
|
||||
"logFileSubscription": {
|
||||
from .queries import SNAPSHOT_ACTIONS
|
||||
|
||||
self.subscription_configs: dict[str, dict] = {
|
||||
action: {
|
||||
"query": query,
|
||||
"resource": f"unraid://live/{action}",
|
||||
"description": f"Real-time {action.replace('_', ' ')} data",
|
||||
"auto_start": True,
|
||||
}
|
||||
for action, query in SNAPSHOT_ACTIONS.items()
|
||||
}
|
||||
self.subscription_configs["logFileSubscription"] = {
|
||||
"query": """
|
||||
subscription LogFileSubscription($path: String!) {
|
||||
logFile(path: $path) {
|
||||
@@ -115,7 +125,6 @@ class SubscriptionManager:
|
||||
"description": "Real-time log file streaming",
|
||||
"auto_start": False, # Started manually with path parameter
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"[SUBSCRIPTION_MANAGER] Initialized with auto_start={self.auto_start_enabled}, max_reconnects={self.max_reconnect_attempts}"
|
||||
@@ -241,7 +250,7 @@ class SubscriptionManager:
|
||||
ws_url = build_ws_url()
|
||||
logger.debug(f"[WEBSOCKET:{subscription_name}] Connecting to: {ws_url}")
|
||||
logger.debug(
|
||||
f"[WEBSOCKET:{subscription_name}] API Key present: {'Yes' if UNRAID_API_KEY else 'No'}"
|
||||
f"[WEBSOCKET:{subscription_name}] API Key present: {'Yes' if _settings.UNRAID_API_KEY else 'No'}"
|
||||
)
|
||||
|
||||
ssl_context = build_ws_ssl_context(ws_url)
|
||||
@@ -275,13 +284,9 @@ class SubscriptionManager:
|
||||
logger.debug(
|
||||
f"[PROTOCOL:{subscription_name}] Initializing GraphQL-WS protocol..."
|
||||
)
|
||||
init_type = "connection_init"
|
||||
init_payload: dict[str, Any] = {"type": init_type}
|
||||
|
||||
if UNRAID_API_KEY:
|
||||
init_payload = build_connection_init()
|
||||
if "payload" in init_payload:
|
||||
logger.debug(f"[AUTH:{subscription_name}] Adding authentication payload")
|
||||
# Use graphql-ws connectionParams format (direct key, not nested headers)
|
||||
init_payload["payload"] = {"x-api-key": UNRAID_API_KEY}
|
||||
else:
|
||||
logger.warning(
|
||||
f"[AUTH:{subscription_name}] No API key available for authentication"
|
||||
|
||||
@@ -1,5 +1,17 @@
|
||||
"""GraphQL subscription query strings for snapshot and collect operations."""
|
||||
|
||||
# Subscriptions that only emit on state changes (not on a regular interval).
|
||||
# When subscribe_once times out for these, it means no recent change — not an error.
|
||||
EVENT_DRIVEN_ACTIONS: frozenset[str] = frozenset(
|
||||
{
|
||||
"parity_progress",
|
||||
"ups_status",
|
||||
"notifications_overview",
|
||||
"owner",
|
||||
"server_status",
|
||||
}
|
||||
)
|
||||
|
||||
SNAPSHOT_ACTIONS = {
|
||||
"cpu": """
|
||||
subscription { systemMetricsCpu { id percentTotal cpus { percentTotal percentUser percentSystem percentIdle } } }
|
||||
|
||||
@@ -95,7 +95,7 @@ def register_subscription_resources(mcp: FastMCP) -> None:
|
||||
"""Real-time log stream data from subscription."""
|
||||
await ensure_subscriptions_started()
|
||||
data = await subscription_manager.get_resource_data("logFileSubscription")
|
||||
if data:
|
||||
if data is not None:
|
||||
return json.dumps(data, indent=2)
|
||||
return json.dumps(
|
||||
{
|
||||
@@ -104,14 +104,37 @@ def register_subscription_resources(mcp: FastMCP) -> None:
|
||||
}
|
||||
)
|
||||
|
||||
def _make_resource_fn(action: str, query: str):
|
||||
def _make_resource_fn(action: str):
|
||||
async def _live_resource() -> str:
|
||||
await ensure_subscriptions_started()
|
||||
try:
|
||||
data = await subscribe_once(query)
|
||||
data = await subscription_manager.get_resource_data(action)
|
||||
if data is not None:
|
||||
return json.dumps(data, indent=2)
|
||||
except Exception as exc:
|
||||
return json.dumps({"error": str(exc), "action": action})
|
||||
# Surface permanent errors instead of reporting "connecting" indefinitely
|
||||
last_error = subscription_manager.last_error.get(action)
|
||||
if last_error:
|
||||
return json.dumps(
|
||||
{
|
||||
"status": "error",
|
||||
"message": f"Subscription '{action}' failed: {last_error}",
|
||||
}
|
||||
)
|
||||
# When auto-start is disabled, fall back to a one-shot fetch so the
|
||||
# resource returns real data instead of a perpetual "connecting" placeholder.
|
||||
if not subscription_manager.auto_start_enabled:
|
||||
try:
|
||||
query_info = SNAPSHOT_ACTIONS.get(action)
|
||||
if query_info is not None:
|
||||
fallback_data = await subscribe_once(query_info)
|
||||
return json.dumps(fallback_data, indent=2)
|
||||
except Exception as e:
|
||||
logger.warning("[RESOURCE] On-demand fallback for '%s' failed: %s", action, e)
|
||||
return json.dumps(
|
||||
{
|
||||
"status": "connecting",
|
||||
"message": f"Subscription '{action}' is starting. Retry in a moment.",
|
||||
}
|
||||
)
|
||||
|
||||
_live_resource.__name__ = f"{action}_resource"
|
||||
_live_resource.__doc__ = (
|
||||
@@ -119,7 +142,7 @@ def register_subscription_resources(mcp: FastMCP) -> None:
|
||||
)
|
||||
return _live_resource
|
||||
|
||||
for _action, _query in SNAPSHOT_ACTIONS.items():
|
||||
mcp.resource(f"unraid://live/{_action}")(_make_resource_fn(_action, _query))
|
||||
for _action in SNAPSHOT_ACTIONS:
|
||||
mcp.resource(f"unraid://live/{_action}")(_make_resource_fn(_action))
|
||||
|
||||
logger.info("Subscription resources registered successfully")
|
||||
|
||||
@@ -11,8 +11,6 @@ WebSocket per call. This is intentional: MCP tools are request-response.
|
||||
Use the SubscriptionManager for long-lived monitoring resources.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Any
|
||||
@@ -21,9 +19,8 @@ import websockets
|
||||
from websockets.typing import Subprotocol
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_KEY
|
||||
from ..core.exceptions import ToolError
|
||||
from .utils import build_ws_ssl_context, build_ws_url
|
||||
from .utils import build_connection_init, build_ws_ssl_context, build_ws_url
|
||||
|
||||
|
||||
async def subscribe_once(
|
||||
@@ -50,10 +47,7 @@ async def subscribe_once(
|
||||
sub_id = "snapshot-1"
|
||||
|
||||
# Handshake
|
||||
init: dict[str, Any] = {"type": "connection_init"}
|
||||
if UNRAID_API_KEY:
|
||||
init["payload"] = {"x-api-key": UNRAID_API_KEY}
|
||||
await ws.send(json.dumps(init))
|
||||
await ws.send(json.dumps(build_connection_init()))
|
||||
|
||||
raw = await asyncio.wait_for(ws.recv(), timeout=timeout)
|
||||
ack = json.loads(raw)
|
||||
@@ -125,10 +119,7 @@ async def subscribe_collect(
|
||||
proto = ws.subprotocol or "graphql-transport-ws"
|
||||
sub_id = "snapshot-1"
|
||||
|
||||
init: dict[str, Any] = {"type": "connection_init"}
|
||||
if UNRAID_API_KEY:
|
||||
init["payload"] = {"x-api-key": UNRAID_API_KEY}
|
||||
await ws.send(json.dumps(init))
|
||||
await ws.send(json.dumps(build_connection_init()))
|
||||
|
||||
raw = await asyncio.wait_for(ws.recv(), timeout=timeout)
|
||||
ack = json.loads(raw)
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
import ssl as _ssl
|
||||
from typing import Any
|
||||
|
||||
from ..config.settings import UNRAID_API_URL, UNRAID_VERIFY_SSL
|
||||
from ..config import settings as _settings
|
||||
|
||||
|
||||
def build_ws_url() -> str:
|
||||
"""Build a WebSocket URL from the configured UNRAID_API_URL.
|
||||
"""Build a WebSocket URL from the configured UNRAID_API_URL setting.
|
||||
|
||||
Converts http(s) scheme to ws(s) and ensures /graphql path suffix.
|
||||
|
||||
@@ -17,19 +17,19 @@ def build_ws_url() -> str:
|
||||
Raises:
|
||||
ValueError: If UNRAID_API_URL is not configured or has an unrecognised scheme.
|
||||
"""
|
||||
if not UNRAID_API_URL:
|
||||
if not _settings.UNRAID_API_URL:
|
||||
raise ValueError("UNRAID_API_URL is not configured")
|
||||
|
||||
if UNRAID_API_URL.startswith("https://"):
|
||||
ws_url = "wss://" + UNRAID_API_URL[len("https://") :]
|
||||
elif UNRAID_API_URL.startswith("http://"):
|
||||
ws_url = "ws://" + UNRAID_API_URL[len("http://") :]
|
||||
elif UNRAID_API_URL.startswith(("ws://", "wss://")):
|
||||
ws_url = UNRAID_API_URL # Already a WebSocket URL
|
||||
if _settings.UNRAID_API_URL.startswith("https://"):
|
||||
ws_url = "wss://" + _settings.UNRAID_API_URL[len("https://") :]
|
||||
elif _settings.UNRAID_API_URL.startswith("http://"):
|
||||
ws_url = "ws://" + _settings.UNRAID_API_URL[len("http://") :]
|
||||
elif _settings.UNRAID_API_URL.startswith(("ws://", "wss://")):
|
||||
ws_url = _settings.UNRAID_API_URL # Already a WebSocket URL
|
||||
else:
|
||||
raise ValueError(
|
||||
f"UNRAID_API_URL must start with http://, https://, ws://, or wss://. "
|
||||
f"Got: {UNRAID_API_URL[:20]}..."
|
||||
f"Got: {_settings.UNRAID_API_URL[:20]}..."
|
||||
)
|
||||
|
||||
if not ws_url.endswith("/graphql"):
|
||||
@@ -49,9 +49,9 @@ def build_ws_ssl_context(ws_url: str) -> _ssl.SSLContext | None:
|
||||
"""
|
||||
if not ws_url.startswith("wss://"):
|
||||
return None
|
||||
if isinstance(UNRAID_VERIFY_SSL, str):
|
||||
return _ssl.create_default_context(cafile=UNRAID_VERIFY_SSL)
|
||||
if UNRAID_VERIFY_SSL:
|
||||
if isinstance(_settings.UNRAID_VERIFY_SSL, str):
|
||||
return _ssl.create_default_context(cafile=_settings.UNRAID_VERIFY_SSL)
|
||||
if _settings.UNRAID_VERIFY_SSL:
|
||||
return _ssl.create_default_context()
|
||||
# Explicitly disable verification (equivalent to verify=False)
|
||||
ctx = _ssl.SSLContext(_ssl.PROTOCOL_TLS_CLIENT)
|
||||
@@ -60,6 +60,18 @@ def build_ws_ssl_context(ws_url: str) -> _ssl.SSLContext | None:
|
||||
return ctx
|
||||
|
||||
|
||||
def build_connection_init() -> dict[str, Any]:
|
||||
"""Build the graphql-ws connection_init message.
|
||||
|
||||
Omits the payload key entirely when no API key is configured —
|
||||
sending {"x-api-key": None} and omitting the key differ for some servers.
|
||||
"""
|
||||
msg: dict[str, Any] = {"type": "connection_init"}
|
||||
if _settings.UNRAID_API_KEY:
|
||||
msg["payload"] = {"x-api-key": _settings.UNRAID_API_KEY}
|
||||
return msg
|
||||
|
||||
|
||||
def _analyze_subscription_status(
|
||||
status: dict[str, Any],
|
||||
) -> tuple[int, list[dict[str, Any]]]:
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
"""MCP tools organized by functional domain.
|
||||
"""MCP tools — single consolidated unraid tool with action + subaction routing.
|
||||
|
||||
10 consolidated tools with 76 actions total:
|
||||
unraid_info - System information queries (19 actions)
|
||||
unraid_array - Array operations and parity management (5 actions)
|
||||
unraid_storage - Storage, disks, and logs (6 actions)
|
||||
unraid_docker - Docker container management (15 actions)
|
||||
unraid_vm - Virtual machine management (9 actions)
|
||||
unraid_notifications - Notification management (9 actions)
|
||||
unraid_rclone - Cloud storage remotes (4 actions)
|
||||
unraid_users - User management (1 action)
|
||||
unraid_keys - API key management (5 actions)
|
||||
unraid_health - Health monitoring and diagnostics (3 actions)
|
||||
unraid - All Unraid operations (15 actions, ~107 subactions)
|
||||
system - System info, metrics, UPS, network, registration
|
||||
health - Health checks, connection test, diagnostics, setup
|
||||
array - Parity, array state, disk add/remove/mount
|
||||
disk - Shares, physical disks, logs, flash backup
|
||||
docker - Container list/details/start/stop/restart, networks
|
||||
vm - VM list/details and lifecycle (start/stop/pause/resume/etc)
|
||||
notification - Notification CRUD and bulk operations
|
||||
key - API key management
|
||||
plugin - Plugin list/add/remove
|
||||
rclone - Cloud remote management
|
||||
setting - System settings and UPS config
|
||||
customization - Theme and UI customization
|
||||
oidc - OIDC/SSO provider management
|
||||
user - Current user info
|
||||
live - Real-time subscription snapshots
|
||||
"""
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
"""Array management: parity checks, array state, and disk operations.
|
||||
|
||||
Provides the `unraid_array` tool with 13 actions covering parity check
|
||||
management, array start/stop, and disk add/remove/mount operations.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import Context, FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
from ..core.setup import elicit_destructive_confirmation
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"parity_status": """
|
||||
query GetParityStatus {
|
||||
array { parityCheckStatus { progress speed errors status paused running correcting } }
|
||||
}
|
||||
""",
|
||||
"parity_history": """
|
||||
query GetParityHistory {
|
||||
parityHistory {
|
||||
date duration speed status errors progress correcting paused running
|
||||
}
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"parity_start": """
|
||||
mutation StartParityCheck($correct: Boolean!) {
|
||||
parityCheck { start(correct: $correct) }
|
||||
}
|
||||
""",
|
||||
"parity_pause": """
|
||||
mutation PauseParityCheck {
|
||||
parityCheck { pause }
|
||||
}
|
||||
""",
|
||||
"parity_resume": """
|
||||
mutation ResumeParityCheck {
|
||||
parityCheck { resume }
|
||||
}
|
||||
""",
|
||||
"parity_cancel": """
|
||||
mutation CancelParityCheck {
|
||||
parityCheck { cancel }
|
||||
}
|
||||
""",
|
||||
"start_array": """
|
||||
mutation StartArray {
|
||||
array { setState(input: { desiredState: START }) {
|
||||
state capacity { kilobytes { free used total } }
|
||||
}}
|
||||
}
|
||||
""",
|
||||
"stop_array": """
|
||||
mutation StopArray {
|
||||
array { setState(input: { desiredState: STOP }) {
|
||||
state
|
||||
}}
|
||||
}
|
||||
""",
|
||||
"add_disk": """
|
||||
mutation AddDisk($id: PrefixedID!, $slot: Int) {
|
||||
array { addDiskToArray(input: { id: $id, slot: $slot }) {
|
||||
state disks { id name device type status }
|
||||
}}
|
||||
}
|
||||
""",
|
||||
"remove_disk": """
|
||||
mutation RemoveDisk($id: PrefixedID!) {
|
||||
array { removeDiskFromArray(input: { id: $id }) {
|
||||
state disks { id name device type }
|
||||
}}
|
||||
}
|
||||
""",
|
||||
"mount_disk": """
|
||||
mutation MountDisk($id: PrefixedID!) {
|
||||
array { mountArrayDisk(id: $id) { id name device status } }
|
||||
}
|
||||
""",
|
||||
"unmount_disk": """
|
||||
mutation UnmountDisk($id: PrefixedID!) {
|
||||
array { unmountArrayDisk(id: $id) { id name device status } }
|
||||
}
|
||||
""",
|
||||
"clear_disk_stats": """
|
||||
mutation ClearDiskStats($id: PrefixedID!) {
|
||||
array { clearArrayDiskStatistics(id: $id) }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS = {"remove_disk", "clear_disk_stats", "stop_array"}
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
|
||||
ARRAY_ACTIONS = Literal[
|
||||
"add_disk",
|
||||
"clear_disk_stats",
|
||||
"mount_disk",
|
||||
"parity_cancel",
|
||||
"parity_history",
|
||||
"parity_pause",
|
||||
"parity_resume",
|
||||
"parity_start",
|
||||
"parity_status",
|
||||
"remove_disk",
|
||||
"start_array",
|
||||
"stop_array",
|
||||
"unmount_disk",
|
||||
]
|
||||
|
||||
if set(get_args(ARRAY_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(ARRAY_ACTIONS))
|
||||
_extra = set(get_args(ARRAY_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"ARRAY_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_array_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_array tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_array(
|
||||
action: ARRAY_ACTIONS,
|
||||
ctx: Context | None = None,
|
||||
confirm: bool = False,
|
||||
correct: bool | None = None,
|
||||
disk_id: str | None = None,
|
||||
slot: int | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage Unraid array: parity checks, array state, and disk operations.
|
||||
|
||||
Parity check actions:
|
||||
parity_start - Start parity check (correct=True to write fixes; required)
|
||||
parity_pause - Pause running parity check
|
||||
parity_resume - Resume paused parity check
|
||||
parity_cancel - Cancel running parity check
|
||||
parity_status - Get current parity check status and progress
|
||||
parity_history - Get parity check history log
|
||||
|
||||
Array state actions:
|
||||
start_array - Start the array (desiredState=START)
|
||||
stop_array - Stop the array (desiredState=STOP)
|
||||
|
||||
Disk operations (requires disk_id):
|
||||
add_disk - Add a disk to the array (requires disk_id; optional slot)
|
||||
remove_disk - Remove a disk from the array (requires disk_id, confirm=True; array must be stopped)
|
||||
mount_disk - Mount a disk (requires disk_id)
|
||||
unmount_disk - Unmount a disk (requires disk_id)
|
||||
clear_disk_stats - Clear I/O statistics for a disk (requires disk_id, confirm=True)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action in DESTRUCTIVE_ACTIONS and not confirm:
|
||||
desc_map = {
|
||||
"remove_disk": f"Remove disk **{disk_id}** from the array. The array must be stopped first.",
|
||||
"clear_disk_stats": f"Clear all I/O statistics for disk **{disk_id}**. This cannot be undone.",
|
||||
"stop_array": "Stop the Unraid array. Running containers and VMs may lose access to array shares.",
|
||||
}
|
||||
confirmed = await elicit_destructive_confirmation(ctx, action, desc_map[action])
|
||||
if not confirmed:
|
||||
raise ToolError(
|
||||
f"Action '{action}' was not confirmed. "
|
||||
"Re-run with confirm=True to bypass elicitation."
|
||||
)
|
||||
|
||||
with tool_error_handler("array", action, logger):
|
||||
logger.info(f"Executing unraid_array action={action}")
|
||||
|
||||
# --- Queries ---
|
||||
if action in QUERIES:
|
||||
data = await make_graphql_request(QUERIES[action])
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
# --- Mutations ---
|
||||
if action == "parity_start":
|
||||
if correct is None:
|
||||
raise ToolError("correct is required for 'parity_start' action")
|
||||
data = await make_graphql_request(MUTATIONS[action], {"correct": correct})
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
if action in ("parity_pause", "parity_resume", "parity_cancel"):
|
||||
data = await make_graphql_request(MUTATIONS[action])
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
if action in ("start_array", "stop_array"):
|
||||
data = await make_graphql_request(MUTATIONS[action])
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
if action == "add_disk":
|
||||
if not disk_id:
|
||||
raise ToolError("disk_id is required for 'add_disk' action")
|
||||
variables: dict[str, Any] = {"id": disk_id}
|
||||
if slot is not None:
|
||||
variables["slot"] = slot
|
||||
data = await make_graphql_request(MUTATIONS[action], variables)
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
if action in ("remove_disk", "mount_disk", "unmount_disk", "clear_disk_stats"):
|
||||
if not disk_id:
|
||||
raise ToolError(f"disk_id is required for '{action}' action")
|
||||
data = await make_graphql_request(MUTATIONS[action], {"id": disk_id})
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Array tool registered successfully")
|
||||
@@ -1,119 +0,0 @@
|
||||
"""UI customization and system state queries.
|
||||
|
||||
Provides the `unraid_customization` tool with 5 actions covering
|
||||
theme/customization data, public UI config, initial setup state, and
|
||||
theme mutation.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Literal, get_args
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"theme": """
|
||||
query GetCustomization {
|
||||
customization {
|
||||
theme { name showBannerImage showBannerGradient showHeaderDescription
|
||||
headerBackgroundColor headerPrimaryTextColor headerSecondaryTextColor }
|
||||
partnerInfo { partnerName hasPartnerLogo partnerUrl partnerLogoUrl }
|
||||
activationCode { code partnerName serverName sysModel comment header theme }
|
||||
}
|
||||
}
|
||||
""",
|
||||
"public_theme": """
|
||||
query GetPublicTheme {
|
||||
publicTheme { name showBannerImage showBannerGradient showHeaderDescription
|
||||
headerBackgroundColor headerPrimaryTextColor headerSecondaryTextColor }
|
||||
publicPartnerInfo { partnerName hasPartnerLogo partnerUrl partnerLogoUrl }
|
||||
}
|
||||
""",
|
||||
"is_initial_setup": """
|
||||
query IsInitialSetup {
|
||||
isInitialSetup
|
||||
}
|
||||
""",
|
||||
"sso_enabled": """
|
||||
query IsSSOEnabled {
|
||||
isSSOEnabled
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"set_theme": """
|
||||
mutation SetTheme($theme: ThemeName!) {
|
||||
customization { setTheme(theme: $theme) {
|
||||
name showBannerImage showBannerGradient showHeaderDescription
|
||||
}}
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
|
||||
CUSTOMIZATION_ACTIONS = Literal[
|
||||
"is_initial_setup",
|
||||
"public_theme",
|
||||
"set_theme",
|
||||
"sso_enabled",
|
||||
"theme",
|
||||
]
|
||||
|
||||
if set(get_args(CUSTOMIZATION_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(CUSTOMIZATION_ACTIONS))
|
||||
_extra = set(get_args(CUSTOMIZATION_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"CUSTOMIZATION_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing: {_missing or 'none'}. Extra: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_customization_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_customization tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_customization(
|
||||
action: CUSTOMIZATION_ACTIONS,
|
||||
theme_name: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage Unraid UI customization and system state.
|
||||
|
||||
Actions:
|
||||
theme - Get full customization (theme, partner info, activation code)
|
||||
public_theme - Get public theme and partner info (no auth required)
|
||||
is_initial_setup - Check if server is in initial setup mode
|
||||
sso_enabled - Check if SSO is enabled
|
||||
set_theme - Change the UI theme (requires theme_name: azure/black/gray/white)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action == "set_theme" and not theme_name:
|
||||
raise ToolError(
|
||||
"theme_name is required for 'set_theme' action "
|
||||
"(valid values: azure, black, gray, white)"
|
||||
)
|
||||
|
||||
with tool_error_handler("customization", action, logger):
|
||||
logger.info(f"Executing unraid_customization action={action}")
|
||||
|
||||
if action in QUERIES:
|
||||
data = await make_graphql_request(QUERIES[action])
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
if action == "set_theme":
|
||||
data = await make_graphql_request(MUTATIONS[action], {"theme": theme_name})
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Customization tool registered successfully")
|
||||
@@ -1,342 +0,0 @@
|
||||
"""Docker container management.
|
||||
|
||||
Provides the `unraid_docker` tool with 7 actions for container lifecycle
|
||||
and network inspection.
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
from ..core.utils import safe_get
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"list": """
|
||||
query ListDockerContainers {
|
||||
docker { containers(skipCache: false) {
|
||||
id names image state status autoStart
|
||||
} }
|
||||
}
|
||||
""",
|
||||
"details": """
|
||||
query GetContainerDetails {
|
||||
docker { containers(skipCache: false) {
|
||||
id names image imageId command created
|
||||
ports { ip privatePort publicPort type }
|
||||
sizeRootFs labels state status
|
||||
hostConfig { networkMode }
|
||||
networkSettings mounts autoStart
|
||||
} }
|
||||
}
|
||||
""",
|
||||
"networks": """
|
||||
query GetDockerNetworks {
|
||||
docker { networks { id name driver scope } }
|
||||
}
|
||||
""",
|
||||
"network_details": """
|
||||
query GetDockerNetwork {
|
||||
docker { networks { id name driver scope enableIPv6 internal attachable containers options labels } }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"start": """
|
||||
mutation StartContainer($id: PrefixedID!) {
|
||||
docker { start(id: $id) { id names state status } }
|
||||
}
|
||||
""",
|
||||
"stop": """
|
||||
mutation StopContainer($id: PrefixedID!) {
|
||||
docker { stop(id: $id) { id names state status } }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS: set[str] = set()
|
||||
# NOTE (Code-M-07): "details" is listed here because it requires a container_id
|
||||
# parameter, but unlike mutations it uses fuzzy name matching (not strict).
|
||||
# This is intentional: read-only queries are safe with fuzzy matching.
|
||||
_ACTIONS_REQUIRING_CONTAINER_ID = {"start", "stop", "details"}
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) | {"restart"}
|
||||
|
||||
DOCKER_ACTIONS = Literal[
|
||||
"list",
|
||||
"details",
|
||||
"start",
|
||||
"stop",
|
||||
"restart",
|
||||
"networks",
|
||||
"network_details",
|
||||
]
|
||||
|
||||
if set(get_args(DOCKER_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(DOCKER_ACTIONS))
|
||||
_extra = set(get_args(DOCKER_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"DOCKER_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
# Full PrefixedID: 64 hex chars + optional suffix (e.g., ":local")
|
||||
_DOCKER_ID_PATTERN = re.compile(r"^[a-f0-9]{64}(:[a-z0-9]+)?$", re.IGNORECASE)
|
||||
|
||||
# Short hex prefix: at least 12 hex chars (standard Docker short ID length)
|
||||
_DOCKER_SHORT_ID_PATTERN = re.compile(r"^[a-f0-9]{12,63}$", re.IGNORECASE)
|
||||
|
||||
|
||||
def find_container_by_identifier(
|
||||
identifier: str, containers: list[dict[str, Any]], *, strict: bool = False
|
||||
) -> dict[str, Any] | None:
|
||||
"""Find a container by ID or name with optional fuzzy matching.
|
||||
|
||||
Match priority:
|
||||
1. Exact ID match
|
||||
2. Exact name match (case-sensitive)
|
||||
|
||||
When strict=False (default), also tries:
|
||||
3. Name starts with identifier (case-insensitive)
|
||||
4. Name contains identifier as substring (case-insensitive)
|
||||
|
||||
When strict=True, only exact matches (1 & 2) are used.
|
||||
Use strict=True for mutations to prevent targeting the wrong container.
|
||||
"""
|
||||
if not containers:
|
||||
return None
|
||||
|
||||
# Priority 1 & 2: exact matches
|
||||
for c in containers:
|
||||
if c.get("id") == identifier:
|
||||
return c
|
||||
if identifier in c.get("names", []):
|
||||
return c
|
||||
|
||||
# Strict mode: no fuzzy matching allowed
|
||||
if strict:
|
||||
return None
|
||||
|
||||
id_lower = identifier.lower()
|
||||
|
||||
# Priority 3: prefix match (more precise than substring)
|
||||
for c in containers:
|
||||
for name in c.get("names", []):
|
||||
if name.lower().startswith(id_lower):
|
||||
logger.debug(f"Prefix match: '{identifier}' -> '{name}'")
|
||||
return c
|
||||
|
||||
# Priority 4: substring match (least precise)
|
||||
for c in containers:
|
||||
for name in c.get("names", []):
|
||||
if id_lower in name.lower():
|
||||
logger.debug(f"Substring match: '{identifier}' -> '{name}'")
|
||||
return c
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_available_container_names(containers: list[dict[str, Any]]) -> list[str]:
|
||||
"""Extract all container names for error messages."""
|
||||
names: list[str] = []
|
||||
for c in containers:
|
||||
names.extend(c.get("names", []))
|
||||
return names
|
||||
|
||||
|
||||
async def _resolve_container_id(container_id: str, *, strict: bool = False) -> str:
|
||||
"""Resolve a container name/identifier to its actual PrefixedID.
|
||||
|
||||
Optimization: if the identifier is a full 64-char hex ID (with optional
|
||||
:suffix), skip the container list fetch entirely and use it directly.
|
||||
If it's a short hex prefix (12-63 chars), fetch the list and match by
|
||||
ID prefix. Only fetch the container list for name-based lookups.
|
||||
|
||||
Args:
|
||||
container_id: Container name or ID to resolve
|
||||
strict: When True, only exact name/ID matches are allowed (no fuzzy).
|
||||
Use for mutations to prevent targeting the wrong container.
|
||||
"""
|
||||
# Full PrefixedID: skip the list fetch entirely
|
||||
if _DOCKER_ID_PATTERN.match(container_id):
|
||||
return container_id
|
||||
|
||||
logger.info(f"Resolving container identifier '{container_id}' (strict={strict})")
|
||||
list_query = """
|
||||
query ResolveContainerID {
|
||||
docker { containers(skipCache: true) { id names } }
|
||||
}
|
||||
"""
|
||||
data = await make_graphql_request(list_query)
|
||||
containers = safe_get(data, "docker", "containers", default=[])
|
||||
|
||||
# Short hex prefix: match by ID prefix before trying name matching
|
||||
if _DOCKER_SHORT_ID_PATTERN.match(container_id):
|
||||
id_lower = container_id.lower()
|
||||
matches: list[dict[str, Any]] = []
|
||||
for c in containers:
|
||||
cid = (c.get("id") or "").lower()
|
||||
if cid.startswith(id_lower) or cid.split(":")[0].startswith(id_lower):
|
||||
matches.append(c)
|
||||
if len(matches) == 1:
|
||||
actual_id = str(matches[0].get("id", ""))
|
||||
logger.info(f"Resolved short ID '{container_id}' -> '{actual_id}'")
|
||||
return actual_id
|
||||
if len(matches) > 1:
|
||||
candidate_ids = [str(c.get("id", "")) for c in matches[:5]]
|
||||
raise ToolError(
|
||||
f"Short container ID prefix '{container_id}' is ambiguous. "
|
||||
f"Matches: {', '.join(candidate_ids)}. Use a longer ID or exact name."
|
||||
)
|
||||
|
||||
resolved = find_container_by_identifier(container_id, containers, strict=strict)
|
||||
if resolved:
|
||||
actual_id = str(resolved.get("id", ""))
|
||||
logger.info(f"Resolved '{container_id}' -> '{actual_id}'")
|
||||
return actual_id
|
||||
|
||||
available = get_available_container_names(containers)
|
||||
if strict:
|
||||
msg = (
|
||||
f"Container '{container_id}' not found by exact match. "
|
||||
f"Mutations require an exact container name or full ID — "
|
||||
f"fuzzy/substring matching is not allowed for safety."
|
||||
)
|
||||
else:
|
||||
msg = f"Container '{container_id}' not found."
|
||||
if available:
|
||||
msg += f" Available: {', '.join(available[:10])}"
|
||||
raise ToolError(msg)
|
||||
|
||||
|
||||
def register_docker_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_docker tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_docker(
|
||||
action: DOCKER_ACTIONS,
|
||||
container_id: str | None = None,
|
||||
network_id: str | None = None,
|
||||
*,
|
||||
confirm: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage Docker containers and networks.
|
||||
|
||||
Actions:
|
||||
list - List all containers
|
||||
details - Detailed info for a container (requires container_id)
|
||||
start - Start a container (requires container_id)
|
||||
stop - Stop a container (requires container_id)
|
||||
restart - Stop then start a container (requires container_id)
|
||||
networks - List Docker networks
|
||||
network_details - Details of a network (requires network_id)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action in _ACTIONS_REQUIRING_CONTAINER_ID and not container_id:
|
||||
raise ToolError(f"container_id is required for '{action}' action")
|
||||
|
||||
if action == "network_details" and not network_id:
|
||||
raise ToolError("network_id is required for 'network_details' action")
|
||||
|
||||
with tool_error_handler("docker", action, logger):
|
||||
logger.info(f"Executing unraid_docker action={action}")
|
||||
|
||||
# --- Read-only queries ---
|
||||
if action == "list":
|
||||
data = await make_graphql_request(QUERIES["list"])
|
||||
containers = safe_get(data, "docker", "containers", default=[])
|
||||
return {"containers": containers}
|
||||
|
||||
if action == "details":
|
||||
# Resolve name -> ID first (skips list fetch if already an ID)
|
||||
actual_id = await _resolve_container_id(container_id or "")
|
||||
data = await make_graphql_request(QUERIES["details"])
|
||||
containers = safe_get(data, "docker", "containers", default=[])
|
||||
# Match by resolved ID (exact match, no second list fetch needed)
|
||||
for c in containers:
|
||||
if c.get("id") == actual_id:
|
||||
return c
|
||||
raise ToolError(f"Container '{container_id}' not found in details response.")
|
||||
|
||||
if action == "networks":
|
||||
data = await make_graphql_request(QUERIES["networks"])
|
||||
networks = safe_get(data, "docker", "networks", default=[])
|
||||
return {"networks": networks}
|
||||
|
||||
if action == "network_details":
|
||||
data = await make_graphql_request(QUERIES["network_details"])
|
||||
all_networks = safe_get(data, "docker", "networks", default=[])
|
||||
# Filter client-side by network_id since the API returns all networks
|
||||
for net in all_networks:
|
||||
if net.get("id") == network_id or net.get("name") == network_id:
|
||||
return dict(net)
|
||||
raise ToolError(f"Network '{network_id}' not found.")
|
||||
|
||||
# --- Mutations (strict matching: no fuzzy/substring) ---
|
||||
if action == "restart":
|
||||
actual_id = await _resolve_container_id(container_id or "", strict=True)
|
||||
# Stop (idempotent: treat "already stopped" as success)
|
||||
stop_data = await make_graphql_request(
|
||||
MUTATIONS["stop"],
|
||||
{"id": actual_id},
|
||||
operation_context={"operation": "stop"},
|
||||
)
|
||||
stop_was_idempotent = stop_data.get("idempotent_success", False)
|
||||
# Start (idempotent: treat "already running" as success)
|
||||
start_data = await make_graphql_request(
|
||||
MUTATIONS["start"],
|
||||
{"id": actual_id},
|
||||
operation_context={"operation": "start"},
|
||||
)
|
||||
if start_data.get("idempotent_success"):
|
||||
result = {}
|
||||
else:
|
||||
result = safe_get(start_data, "docker", "start", default={})
|
||||
response: dict[str, Any] = {
|
||||
"success": True,
|
||||
"action": "restart",
|
||||
"container": result,
|
||||
}
|
||||
if stop_was_idempotent:
|
||||
response["note"] = "Container was already stopped before restart"
|
||||
return response
|
||||
|
||||
# Single-container mutations (start, stop)
|
||||
if action in MUTATIONS:
|
||||
actual_id = await _resolve_container_id(container_id or "", strict=True)
|
||||
op_context: dict[str, str] | None = (
|
||||
{"operation": action} if action in ("start", "stop") else None
|
||||
)
|
||||
data = await make_graphql_request(
|
||||
MUTATIONS[action],
|
||||
{"id": actual_id},
|
||||
operation_context=op_context,
|
||||
)
|
||||
|
||||
# Handle idempotent success
|
||||
if data.get("idempotent_success"):
|
||||
return {
|
||||
"success": True,
|
||||
"action": action,
|
||||
"idempotent": True,
|
||||
"message": f"Container already in desired state for '{action}'",
|
||||
}
|
||||
|
||||
docker_data = data.get("docker") or {}
|
||||
field = action
|
||||
result_container = docker_data.get(field)
|
||||
return {
|
||||
"success": True,
|
||||
"action": action,
|
||||
"container": result_container,
|
||||
}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Docker tool registered successfully")
|
||||
@@ -1,275 +0,0 @@
|
||||
"""Health monitoring and diagnostics.
|
||||
|
||||
Provides the `unraid_health` tool with 4 actions for system health checks,
|
||||
connection testing, subscription diagnostics, and credential setup.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import time
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import Context, FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import (
|
||||
CREDENTIALS_ENV_PATH,
|
||||
UNRAID_API_URL,
|
||||
UNRAID_MCP_HOST,
|
||||
UNRAID_MCP_PORT,
|
||||
UNRAID_MCP_TRANSPORT,
|
||||
VERSION,
|
||||
)
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
from ..core.setup import elicit_and_configure
|
||||
from ..core.utils import safe_display_url
|
||||
from ..subscriptions.utils import _analyze_subscription_status
|
||||
|
||||
|
||||
ALL_ACTIONS = {"check", "test_connection", "diagnose", "setup"}
|
||||
|
||||
HEALTH_ACTIONS = Literal["check", "test_connection", "diagnose", "setup"]
|
||||
|
||||
if set(get_args(HEALTH_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(HEALTH_ACTIONS))
|
||||
_extra = set(get_args(HEALTH_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
"HEALTH_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing in HEALTH_ACTIONS: {_missing}; extra in HEALTH_ACTIONS: {_extra}"
|
||||
)
|
||||
|
||||
# Severity ordering: only upgrade, never downgrade
|
||||
_SEVERITY = {"healthy": 0, "warning": 1, "degraded": 2, "unhealthy": 3}
|
||||
|
||||
|
||||
def _server_info() -> dict[str, Any]:
|
||||
"""Return the standard server info block used in health responses."""
|
||||
return {
|
||||
"name": "Unraid MCP Server",
|
||||
"version": VERSION,
|
||||
"transport": UNRAID_MCP_TRANSPORT,
|
||||
"host": UNRAID_MCP_HOST,
|
||||
"port": UNRAID_MCP_PORT,
|
||||
}
|
||||
|
||||
|
||||
def register_health_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_health tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_health(
|
||||
action: HEALTH_ACTIONS,
|
||||
ctx: Context | None = None,
|
||||
) -> dict[str, Any] | str:
|
||||
"""Monitor Unraid MCP server and system health.
|
||||
|
||||
Actions:
|
||||
setup - Configure Unraid credentials via interactive elicitation
|
||||
check - Comprehensive health check (API latency, array, notifications, Docker)
|
||||
test_connection - Quick connectivity test (just checks { online })
|
||||
diagnose - Subscription system diagnostics
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action == "setup":
|
||||
|
||||
configured = await elicit_and_configure(ctx)
|
||||
if configured:
|
||||
return (
|
||||
"✅ Credentials configured successfully. You can now use all Unraid MCP tools."
|
||||
)
|
||||
return (
|
||||
f"⚠️ Credentials not configured.\n\n"
|
||||
f"Your MCP client may not support elicitation, or setup was cancelled.\n\n"
|
||||
f"**Manual setup** — create `{CREDENTIALS_ENV_PATH}` with:\n"
|
||||
f"```\n"
|
||||
f"UNRAID_API_URL=https://your-unraid-server:port\n"
|
||||
f"UNRAID_API_KEY=your-api-key\n"
|
||||
f"```\n\n"
|
||||
f"Then run any Unraid tool to connect."
|
||||
)
|
||||
|
||||
with tool_error_handler("health", action, logger):
|
||||
logger.info(f"Executing unraid_health action={action}")
|
||||
|
||||
if action == "test_connection":
|
||||
start = time.time()
|
||||
data = await make_graphql_request("query { online }")
|
||||
latency = round((time.time() - start) * 1000, 2)
|
||||
return {
|
||||
"status": "connected",
|
||||
"online": data.get("online"),
|
||||
"latency_ms": latency,
|
||||
}
|
||||
|
||||
if action == "check":
|
||||
return await _comprehensive_check()
|
||||
|
||||
if action == "diagnose":
|
||||
return await _diagnose_subscriptions()
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Health tool registered successfully")
|
||||
|
||||
|
||||
async def _comprehensive_check() -> dict[str, Any]:
|
||||
"""Run comprehensive health check against the Unraid system."""
|
||||
start_time = time.time()
|
||||
health_severity = 0 # Track as int to prevent downgrade
|
||||
issues: list[str] = []
|
||||
|
||||
def _escalate(level: str) -> None:
|
||||
nonlocal health_severity
|
||||
health_severity = max(health_severity, _SEVERITY.get(level, 0))
|
||||
|
||||
try:
|
||||
query = """
|
||||
query ComprehensiveHealthCheck {
|
||||
info {
|
||||
machineId time
|
||||
versions { core { unraid } }
|
||||
os { uptime }
|
||||
}
|
||||
array { state }
|
||||
notifications {
|
||||
overview { unread { alert warning total } }
|
||||
}
|
||||
docker {
|
||||
containers(skipCache: true) { id state status }
|
||||
}
|
||||
}
|
||||
"""
|
||||
data = await make_graphql_request(query)
|
||||
api_latency = round((time.time() - start_time) * 1000, 2)
|
||||
|
||||
health_info: dict[str, Any] = {
|
||||
"status": "healthy",
|
||||
"timestamp": datetime.datetime.now(datetime.UTC).isoformat(),
|
||||
"api_latency_ms": api_latency,
|
||||
"server": _server_info(),
|
||||
}
|
||||
|
||||
if not data:
|
||||
health_info["status"] = "unhealthy"
|
||||
health_info["issues"] = ["No response from Unraid API"]
|
||||
return health_info
|
||||
|
||||
# System info
|
||||
info = data.get("info") or {}
|
||||
if info:
|
||||
health_info["unraid_system"] = {
|
||||
"status": "connected",
|
||||
"url": safe_display_url(UNRAID_API_URL),
|
||||
"machine_id": info.get("machineId"),
|
||||
"version": ((info.get("versions") or {}).get("core") or {}).get("unraid"),
|
||||
"uptime": (info.get("os") or {}).get("uptime"),
|
||||
}
|
||||
else:
|
||||
_escalate("degraded")
|
||||
issues.append("Unable to retrieve system info")
|
||||
|
||||
# Array
|
||||
array_info = data.get("array") or {}
|
||||
if array_info:
|
||||
state = array_info.get("state", "unknown")
|
||||
health_info["array_status"] = {
|
||||
"state": state,
|
||||
"healthy": state in ("STARTED", "STOPPED"),
|
||||
}
|
||||
if state not in ("STARTED", "STOPPED"):
|
||||
_escalate("warning")
|
||||
issues.append(f"Array in unexpected state: {state}")
|
||||
else:
|
||||
_escalate("warning")
|
||||
issues.append("Unable to retrieve array status")
|
||||
|
||||
# Notifications
|
||||
notifications = data.get("notifications") or {}
|
||||
if notifications and notifications.get("overview"):
|
||||
unread = notifications["overview"].get("unread") or {}
|
||||
alerts = unread.get("alert", 0)
|
||||
health_info["notifications"] = {
|
||||
"unread_total": unread.get("total", 0),
|
||||
"unread_alerts": alerts,
|
||||
"unread_warnings": unread.get("warning", 0),
|
||||
}
|
||||
if alerts > 0:
|
||||
_escalate("warning")
|
||||
issues.append(f"{alerts} unread alert(s)")
|
||||
|
||||
# Docker
|
||||
docker = data.get("docker") or {}
|
||||
if docker and docker.get("containers"):
|
||||
containers = docker["containers"]
|
||||
health_info["docker_services"] = {
|
||||
"total": len(containers),
|
||||
"running": len([c for c in containers if c.get("state") == "running"]),
|
||||
"stopped": len([c for c in containers if c.get("state") == "exited"]),
|
||||
}
|
||||
|
||||
# Latency assessment
|
||||
if api_latency > 10000:
|
||||
_escalate("degraded")
|
||||
issues.append(f"Very high API latency: {api_latency}ms")
|
||||
elif api_latency > 5000:
|
||||
_escalate("warning")
|
||||
issues.append(f"High API latency: {api_latency}ms")
|
||||
|
||||
# Resolve final status from severity level
|
||||
severity_to_status = {v: k for k, v in _SEVERITY.items()}
|
||||
health_info["status"] = severity_to_status.get(health_severity, "healthy")
|
||||
if issues:
|
||||
health_info["issues"] = issues
|
||||
health_info["performance"] = {
|
||||
"api_response_time_ms": api_latency,
|
||||
"check_duration_ms": round((time.time() - start_time) * 1000, 2),
|
||||
}
|
||||
|
||||
return health_info
|
||||
|
||||
except Exception as e:
|
||||
# Intentionally broad: health checks must always return a result,
|
||||
# even on unexpected failures, so callers never get an unhandled exception.
|
||||
logger.error(f"Health check failed: {e}", exc_info=True)
|
||||
return {
|
||||
"status": "unhealthy",
|
||||
"timestamp": datetime.datetime.now(datetime.UTC).isoformat(),
|
||||
"error": str(e),
|
||||
"server": _server_info(),
|
||||
}
|
||||
|
||||
|
||||
async def _diagnose_subscriptions() -> dict[str, Any]:
|
||||
"""Import and run subscription diagnostics."""
|
||||
try:
|
||||
from ..subscriptions.manager import subscription_manager
|
||||
from ..subscriptions.resources import ensure_subscriptions_started
|
||||
|
||||
await ensure_subscriptions_started()
|
||||
|
||||
status = await subscription_manager.get_subscription_status()
|
||||
error_count, connection_issues = _analyze_subscription_status(status)
|
||||
|
||||
return {
|
||||
"timestamp": datetime.datetime.now(datetime.UTC).isoformat(),
|
||||
"environment": {
|
||||
"auto_start_enabled": subscription_manager.auto_start_enabled,
|
||||
"max_reconnect_attempts": subscription_manager.max_reconnect_attempts,
|
||||
"api_url_configured": bool(UNRAID_API_URL),
|
||||
},
|
||||
"subscriptions": status,
|
||||
"summary": {
|
||||
"total_configured": len(subscription_manager.subscription_configs),
|
||||
"active_count": len(subscription_manager.active_subscriptions),
|
||||
"with_data": len(subscription_manager.resource_data),
|
||||
"in_error_state": error_count,
|
||||
"connection_issues": connection_issues,
|
||||
},
|
||||
}
|
||||
|
||||
except ImportError as e:
|
||||
raise ToolError("Subscription modules not available") from e
|
||||
except Exception as e:
|
||||
raise ToolError(f"Failed to generate diagnostics: {e!s}") from e
|
||||
@@ -1,449 +0,0 @@
|
||||
"""System information and server status queries.
|
||||
|
||||
Provides the `unraid_info` tool with 19 read-only actions for retrieving
|
||||
system information, array status, network config, and server metadata.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
from ..core.utils import format_kb
|
||||
|
||||
|
||||
# Pre-built queries keyed by action name
|
||||
QUERIES: dict[str, str] = {
|
||||
"overview": """
|
||||
query GetSystemInfo {
|
||||
info {
|
||||
os { platform distro release codename kernel arch hostname logofile serial build uptime }
|
||||
cpu { manufacturer brand vendor family model stepping revision voltage speed speedmin speedmax threads cores processors socket cache }
|
||||
memory {
|
||||
layout { bank type clockSpeed formFactor manufacturer partNum serialNum }
|
||||
}
|
||||
baseboard { manufacturer model version serial assetTag }
|
||||
system { manufacturer model version serial uuid sku }
|
||||
versions { core { unraid api kernel } packages { openssl node npm pm2 git nginx php docker } }
|
||||
machineId
|
||||
time
|
||||
}
|
||||
}
|
||||
""",
|
||||
"array": """
|
||||
query GetArrayStatus {
|
||||
array {
|
||||
id
|
||||
state
|
||||
capacity {
|
||||
kilobytes { free used total }
|
||||
disks { free used total }
|
||||
}
|
||||
boot { id idx name device size status rotational temp numReads numWrites numErrors fsSize fsFree fsUsed exportable type warning critical fsType comment format transport color }
|
||||
parities { id idx name device size status rotational temp numReads numWrites numErrors fsSize fsFree fsUsed exportable type warning critical fsType comment format transport color }
|
||||
disks { id idx name device size status rotational temp numReads numWrites numErrors fsSize fsFree fsUsed exportable type warning critical fsType comment format transport color }
|
||||
caches { id idx name device size status rotational temp numReads numWrites numErrors fsSize fsFree fsUsed exportable type warning critical fsType comment format transport color }
|
||||
}
|
||||
}
|
||||
""",
|
||||
"network": """
|
||||
query GetNetworkInfo {
|
||||
servers { id name status wanip lanip localurl remoteurl }
|
||||
vars { id port portssl localTld useSsl }
|
||||
}
|
||||
""",
|
||||
"registration": """
|
||||
query GetRegistrationInfo {
|
||||
registration {
|
||||
id type
|
||||
keyFile { location }
|
||||
state expiration updateExpiration
|
||||
}
|
||||
}
|
||||
""",
|
||||
"connect": """
|
||||
query GetConnectSettings {
|
||||
connect { id dynamicRemoteAccess { enabledType runningType error } }
|
||||
}
|
||||
""",
|
||||
"variables": """
|
||||
query GetSelectiveUnraidVariables {
|
||||
vars {
|
||||
id version name timeZone comment security workgroup domain domainShort
|
||||
hideDotFiles localMaster enableFruit useNtp domainLogin sysModel
|
||||
sysFlashSlots useSsl port portssl localTld bindMgt useTelnet porttelnet
|
||||
useSsh portssh startPage startArray shutdownTimeout
|
||||
shareSmbEnabled shareNfsEnabled shareAfpEnabled shareCacheEnabled
|
||||
shareAvahiEnabled safeMode startMode configValid configError joinStatus
|
||||
deviceCount flashGuid flashProduct flashVendor mdState mdVersion
|
||||
shareCount shareSmbCount shareNfsCount shareAfpCount shareMoverActive
|
||||
}
|
||||
}
|
||||
""",
|
||||
"metrics": """
|
||||
query GetMetrics {
|
||||
metrics { cpu { percentTotal } memory { total used free available buffcache percentTotal } }
|
||||
}
|
||||
""",
|
||||
"services": """
|
||||
query GetServices {
|
||||
services { name online version }
|
||||
}
|
||||
""",
|
||||
"display": """
|
||||
query GetDisplay {
|
||||
info { display { theme } }
|
||||
}
|
||||
""",
|
||||
"config": """
|
||||
query GetConfig {
|
||||
config { valid error }
|
||||
}
|
||||
""",
|
||||
"online": """
|
||||
query GetOnline { online }
|
||||
""",
|
||||
"owner": """
|
||||
query GetOwner {
|
||||
owner { username avatar url }
|
||||
}
|
||||
""",
|
||||
"settings": """
|
||||
query GetSettings {
|
||||
settings { unified { values } }
|
||||
}
|
||||
""",
|
||||
"server": """
|
||||
query GetServer {
|
||||
info {
|
||||
os { hostname uptime }
|
||||
versions { core { unraid } }
|
||||
machineId time
|
||||
}
|
||||
array { state }
|
||||
online
|
||||
}
|
||||
""",
|
||||
"servers": """
|
||||
query GetServers {
|
||||
servers { id name status wanip lanip localurl remoteurl }
|
||||
}
|
||||
""",
|
||||
"flash": """
|
||||
query GetFlash {
|
||||
flash { id vendor product }
|
||||
}
|
||||
""",
|
||||
"ups_devices": """
|
||||
query GetUpsDevices {
|
||||
upsDevices { id name model status battery { chargeLevel estimatedRuntime health } power { loadPercentage inputVoltage outputVoltage } }
|
||||
}
|
||||
""",
|
||||
"ups_device": """
|
||||
query GetUpsDevice($id: String!) {
|
||||
upsDeviceById(id: $id) { id name model status battery { chargeLevel estimatedRuntime health } power { loadPercentage inputVoltage outputVoltage nominalPower currentPower } }
|
||||
}
|
||||
""",
|
||||
"ups_config": """
|
||||
query GetUpsConfig {
|
||||
upsConfiguration { service upsCable upsType device batteryLevel minutes timeout killUps upsName }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {}
|
||||
|
||||
DESTRUCTIVE_ACTIONS: set[str] = set()
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
|
||||
INFO_ACTIONS = Literal[
|
||||
"overview",
|
||||
"array",
|
||||
"network",
|
||||
"registration",
|
||||
"connect",
|
||||
"variables",
|
||||
"metrics",
|
||||
"services",
|
||||
"display",
|
||||
"config",
|
||||
"online",
|
||||
"owner",
|
||||
"settings",
|
||||
"server",
|
||||
"servers",
|
||||
"flash",
|
||||
"ups_devices",
|
||||
"ups_device",
|
||||
"ups_config",
|
||||
]
|
||||
|
||||
if set(get_args(INFO_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(INFO_ACTIONS))
|
||||
_extra = set(get_args(INFO_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"QUERIES keys and INFO_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def _process_system_info(raw_info: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Process raw system info into summary + details."""
|
||||
summary: dict[str, Any] = {}
|
||||
if raw_info.get("os"):
|
||||
os_info = raw_info["os"]
|
||||
summary["os"] = (
|
||||
f"{os_info.get('distro') or 'unknown'} {os_info.get('release') or 'unknown'} "
|
||||
f"({os_info.get('platform') or 'unknown'}, {os_info.get('arch') or 'unknown'})"
|
||||
)
|
||||
summary["hostname"] = os_info.get("hostname") or "unknown"
|
||||
summary["uptime"] = os_info.get("uptime")
|
||||
|
||||
if raw_info.get("cpu"):
|
||||
cpu = raw_info["cpu"]
|
||||
summary["cpu"] = (
|
||||
f"{cpu.get('manufacturer') or 'unknown'} {cpu.get('brand') or 'unknown'} "
|
||||
f"({cpu.get('cores') or '?'} cores, {cpu.get('threads') or '?'} threads)"
|
||||
)
|
||||
|
||||
if raw_info.get("memory") and raw_info["memory"].get("layout"):
|
||||
mem_layout = raw_info["memory"]["layout"]
|
||||
summary["memory_layout_details"] = []
|
||||
for stick in mem_layout:
|
||||
summary["memory_layout_details"].append(
|
||||
f"Bank {stick.get('bank') or '?'}: Type {stick.get('type') or '?'}, "
|
||||
f"Speed {stick.get('clockSpeed') or '?'}MHz, "
|
||||
f"Manufacturer: {stick.get('manufacturer') or '?'}, "
|
||||
f"Part: {stick.get('partNum') or '?'}"
|
||||
)
|
||||
summary["memory_summary"] = (
|
||||
"Stick layout details retrieved. Overall total/used/free memory stats "
|
||||
"are unavailable due to API limitations."
|
||||
)
|
||||
else:
|
||||
summary["memory_summary"] = "Memory information not available."
|
||||
|
||||
return {"summary": summary, "details": raw_info}
|
||||
|
||||
|
||||
def _analyze_disk_health(disks: list[dict[str, Any]]) -> dict[str, int]:
|
||||
"""Analyze health status of disk arrays."""
|
||||
counts = {
|
||||
"healthy": 0,
|
||||
"failed": 0,
|
||||
"missing": 0,
|
||||
"new": 0,
|
||||
"warning": 0,
|
||||
"critical": 0,
|
||||
"unknown": 0,
|
||||
}
|
||||
for disk in disks:
|
||||
status = disk.get("status", "").upper()
|
||||
warning = disk.get("warning")
|
||||
critical = disk.get("critical")
|
||||
if status == "DISK_OK":
|
||||
if critical:
|
||||
counts["critical"] += 1
|
||||
elif warning:
|
||||
counts["warning"] += 1
|
||||
else:
|
||||
counts["healthy"] += 1
|
||||
elif status in ("DISK_DSBL", "DISK_INVALID"):
|
||||
counts["failed"] += 1
|
||||
elif status == "DISK_NP":
|
||||
counts["missing"] += 1
|
||||
elif status == "DISK_NEW":
|
||||
counts["new"] += 1
|
||||
else:
|
||||
counts["unknown"] += 1
|
||||
return counts
|
||||
|
||||
|
||||
def _process_array_status(raw: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Process raw array data into summary + details."""
|
||||
summary: dict[str, Any] = {"state": raw.get("state")}
|
||||
if raw.get("capacity") and raw["capacity"].get("kilobytes"):
|
||||
kb = raw["capacity"]["kilobytes"]
|
||||
summary["capacity_total"] = format_kb(kb.get("total"))
|
||||
summary["capacity_used"] = format_kb(kb.get("used"))
|
||||
summary["capacity_free"] = format_kb(kb.get("free"))
|
||||
|
||||
summary["num_parity_disks"] = len(raw.get("parities", []))
|
||||
summary["num_data_disks"] = len(raw.get("disks", []))
|
||||
summary["num_cache_pools"] = len(raw.get("caches", []))
|
||||
|
||||
health_summary: dict[str, Any] = {}
|
||||
for key, label in [
|
||||
("parities", "parity_health"),
|
||||
("disks", "data_health"),
|
||||
("caches", "cache_health"),
|
||||
]:
|
||||
if raw.get(key):
|
||||
health_summary[label] = _analyze_disk_health(raw[key])
|
||||
|
||||
total_failed = sum(h.get("failed", 0) for h in health_summary.values())
|
||||
total_critical = sum(h.get("critical", 0) for h in health_summary.values())
|
||||
total_missing = sum(h.get("missing", 0) for h in health_summary.values())
|
||||
total_warning = sum(h.get("warning", 0) for h in health_summary.values())
|
||||
|
||||
if total_failed > 0 or total_critical > 0:
|
||||
overall = "CRITICAL"
|
||||
elif total_missing > 0:
|
||||
overall = "DEGRADED"
|
||||
elif total_warning > 0:
|
||||
overall = "WARNING"
|
||||
else:
|
||||
overall = "HEALTHY"
|
||||
|
||||
summary["overall_health"] = overall
|
||||
summary["health_summary"] = health_summary
|
||||
|
||||
return {"summary": summary, "details": raw}
|
||||
|
||||
|
||||
def register_info_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_info tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_info(
|
||||
action: INFO_ACTIONS,
|
||||
device_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Query Unraid system information.
|
||||
|
||||
Actions:
|
||||
overview - OS, CPU, memory, baseboard, versions
|
||||
array - Array state, capacity, disk health
|
||||
network - Access URLs, interfaces
|
||||
registration - License type, state, expiration
|
||||
connect - Unraid Connect settings
|
||||
variables - System variables and configuration
|
||||
metrics - CPU and memory utilization
|
||||
services - Running services
|
||||
display - Theme settings
|
||||
config - Configuration validity
|
||||
online - Server online status
|
||||
owner - Server owner info
|
||||
settings - All unified settings
|
||||
server - Quick server summary
|
||||
servers - Connected servers list
|
||||
flash - Flash drive info
|
||||
ups_devices - List UPS devices
|
||||
ups_device - Single UPS device (requires device_id)
|
||||
ups_config - UPS configuration
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action == "ups_device" and not device_id:
|
||||
raise ToolError("device_id is required for ups_device action")
|
||||
|
||||
# connect is not available on all Unraid API versions
|
||||
if action == "connect":
|
||||
raise ToolError(
|
||||
"The 'connect' query is not available on this Unraid API version. "
|
||||
"Use the 'settings' action for API and SSO configuration."
|
||||
)
|
||||
|
||||
query = QUERIES[action]
|
||||
variables: dict[str, Any] | None = None
|
||||
if action == "ups_device":
|
||||
variables = {"id": device_id}
|
||||
|
||||
# Lookup tables for common response patterns
|
||||
# Simple dict actions: action -> GraphQL response key
|
||||
dict_actions: dict[str, str] = {
|
||||
"registration": "registration",
|
||||
"variables": "vars",
|
||||
"metrics": "metrics",
|
||||
"config": "config",
|
||||
"owner": "owner",
|
||||
"flash": "flash",
|
||||
"ups_device": "upsDeviceById",
|
||||
"ups_config": "upsConfiguration",
|
||||
}
|
||||
# List-wrapped actions: action -> (GraphQL response key, output key)
|
||||
list_actions: dict[str, tuple[str, str]] = {
|
||||
"services": ("services", "services"),
|
||||
"servers": ("servers", "servers"),
|
||||
"ups_devices": ("upsDevices", "ups_devices"),
|
||||
}
|
||||
|
||||
with tool_error_handler("info", action, logger):
|
||||
logger.info(f"Executing unraid_info action={action}")
|
||||
data = await make_graphql_request(query, variables)
|
||||
|
||||
# Special-case actions with custom processing
|
||||
if action == "overview":
|
||||
raw = data.get("info") or {}
|
||||
if not raw:
|
||||
raise ToolError("No system info returned from Unraid API")
|
||||
return _process_system_info(raw)
|
||||
|
||||
if action == "array":
|
||||
raw = data.get("array") or {}
|
||||
if not raw:
|
||||
raise ToolError("No array information returned from Unraid API")
|
||||
return _process_array_status(raw)
|
||||
|
||||
if action == "display":
|
||||
info = data.get("info") or {}
|
||||
return dict(info.get("display") or {})
|
||||
|
||||
if action == "online":
|
||||
return {"online": data.get("online")}
|
||||
|
||||
if action == "settings":
|
||||
settings = data.get("settings") or {}
|
||||
if not settings:
|
||||
raise ToolError(
|
||||
"No settings data returned from Unraid API. Check API permissions."
|
||||
)
|
||||
if not settings.get("unified"):
|
||||
logger.warning(f"Settings returned unexpected structure: {settings.keys()}")
|
||||
raise ToolError(
|
||||
f"Unexpected settings structure. Expected 'unified' key, got: {list(settings.keys())}"
|
||||
)
|
||||
values = settings["unified"].get("values") or {}
|
||||
return dict(values) if isinstance(values, dict) else {"raw": values}
|
||||
|
||||
if action == "server":
|
||||
return data
|
||||
|
||||
if action == "network":
|
||||
servers_data = data.get("servers") or []
|
||||
vars_data = data.get("vars") or {}
|
||||
access_urls = []
|
||||
for srv in servers_data:
|
||||
if srv.get("lanip"):
|
||||
access_urls.append(
|
||||
{"type": "LAN", "ipv4": srv["lanip"], "url": srv.get("localurl")}
|
||||
)
|
||||
if srv.get("wanip"):
|
||||
access_urls.append(
|
||||
{"type": "WAN", "ipv4": srv["wanip"], "url": srv.get("remoteurl")}
|
||||
)
|
||||
return {
|
||||
"accessUrls": access_urls,
|
||||
"httpPort": vars_data.get("port"),
|
||||
"httpsPort": vars_data.get("portssl"),
|
||||
"localTld": vars_data.get("localTld"),
|
||||
"useSsl": vars_data.get("useSsl"),
|
||||
}
|
||||
|
||||
# Simple dict-returning actions
|
||||
if action in dict_actions:
|
||||
return dict(data.get(dict_actions[action]) or {})
|
||||
|
||||
# List-wrapped actions
|
||||
if action in list_actions:
|
||||
response_key, output_key = list_actions[action]
|
||||
items = data.get(response_key) or []
|
||||
normalized_items = list(items) if isinstance(items, list) else []
|
||||
return {output_key: normalized_items}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Info tool registered successfully")
|
||||
@@ -1,203 +0,0 @@
|
||||
"""API key management.
|
||||
|
||||
Provides the `unraid_keys` tool with 5 actions for listing, viewing,
|
||||
creating, updating, and deleting API keys.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import Context, FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
from ..core.setup import elicit_destructive_confirmation
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"list": """
|
||||
query ListApiKeys {
|
||||
apiKeys { id name roles permissions { resource actions } createdAt }
|
||||
}
|
||||
""",
|
||||
"get": """
|
||||
query GetApiKey($id: PrefixedID!) {
|
||||
apiKey(id: $id) { id name roles permissions { resource actions } createdAt }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"create": """
|
||||
mutation CreateApiKey($input: CreateApiKeyInput!) {
|
||||
apiKey { create(input: $input) { id name key roles } }
|
||||
}
|
||||
""",
|
||||
"update": """
|
||||
mutation UpdateApiKey($input: UpdateApiKeyInput!) {
|
||||
apiKey { update(input: $input) { id name roles } }
|
||||
}
|
||||
""",
|
||||
"delete": """
|
||||
mutation DeleteApiKey($input: DeleteApiKeyInput!) {
|
||||
apiKey { delete(input: $input) }
|
||||
}
|
||||
""",
|
||||
"add_role": """
|
||||
mutation AddRole($input: AddRoleForApiKeyInput!) {
|
||||
apiKey { addRole(input: $input) }
|
||||
}
|
||||
""",
|
||||
"remove_role": """
|
||||
mutation RemoveRole($input: RemoveRoleFromApiKeyInput!) {
|
||||
apiKey { removeRole(input: $input) }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS = {"delete"}
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
|
||||
KEY_ACTIONS = Literal[
|
||||
"add_role",
|
||||
"create",
|
||||
"delete",
|
||||
"get",
|
||||
"list",
|
||||
"remove_role",
|
||||
"update",
|
||||
]
|
||||
|
||||
if set(get_args(KEY_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(KEY_ACTIONS))
|
||||
_extra = set(get_args(KEY_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"KEY_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_keys_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_keys tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_keys(
|
||||
action: KEY_ACTIONS,
|
||||
ctx: Context | None = None,
|
||||
confirm: bool = False,
|
||||
key_id: str | None = None,
|
||||
name: str | None = None,
|
||||
roles: list[str] | None = None,
|
||||
permissions: list[str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage Unraid API keys.
|
||||
|
||||
Actions:
|
||||
list - List all API keys
|
||||
get - Get a specific API key (requires key_id)
|
||||
create - Create a new API key (requires name; optional roles, permissions)
|
||||
update - Update an API key (requires key_id; optional name, roles)
|
||||
delete - Delete API keys (requires key_id, confirm=True)
|
||||
add_role - Add a role to an API key (requires key_id and roles)
|
||||
remove_role - Remove a role from an API key (requires key_id and roles)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action in DESTRUCTIVE_ACTIONS and not confirm:
|
||||
_desc = f"Delete API key **{key_id}**. Any clients using this key will lose access."
|
||||
confirmed = await elicit_destructive_confirmation(ctx, action, _desc)
|
||||
if not confirmed:
|
||||
raise ToolError(
|
||||
f"Action '{action}' was not confirmed. "
|
||||
"Re-run with confirm=True to bypass elicitation."
|
||||
)
|
||||
|
||||
with tool_error_handler("keys", action, logger):
|
||||
logger.info(f"Executing unraid_keys action={action}")
|
||||
|
||||
if action == "list":
|
||||
data = await make_graphql_request(QUERIES["list"])
|
||||
keys = data.get("apiKeys", [])
|
||||
return {"keys": list(keys) if isinstance(keys, list) else []}
|
||||
|
||||
if action == "get":
|
||||
if not key_id:
|
||||
raise ToolError("key_id is required for 'get' action")
|
||||
data = await make_graphql_request(QUERIES["get"], {"id": key_id})
|
||||
return dict(data.get("apiKey") or {})
|
||||
|
||||
if action == "create":
|
||||
if not name:
|
||||
raise ToolError("name is required for 'create' action")
|
||||
input_data: dict[str, Any] = {"name": name}
|
||||
if roles is not None:
|
||||
input_data["roles"] = roles
|
||||
if permissions is not None:
|
||||
input_data["permissions"] = permissions
|
||||
data = await make_graphql_request(MUTATIONS["create"], {"input": input_data})
|
||||
created_key = (data.get("apiKey") or {}).get("create")
|
||||
if not created_key:
|
||||
raise ToolError("Failed to create API key: no data returned from server")
|
||||
return {"success": True, "key": created_key}
|
||||
|
||||
if action == "update":
|
||||
if not key_id:
|
||||
raise ToolError("key_id is required for 'update' action")
|
||||
input_data: dict[str, Any] = {"id": key_id}
|
||||
if name:
|
||||
input_data["name"] = name
|
||||
if roles is not None:
|
||||
input_data["roles"] = roles
|
||||
data = await make_graphql_request(MUTATIONS["update"], {"input": input_data})
|
||||
updated_key = (data.get("apiKey") or {}).get("update")
|
||||
if not updated_key:
|
||||
raise ToolError("Failed to update API key: no data returned from server")
|
||||
return {"success": True, "key": updated_key}
|
||||
|
||||
if action == "delete":
|
||||
if not key_id:
|
||||
raise ToolError("key_id is required for 'delete' action")
|
||||
data = await make_graphql_request(MUTATIONS["delete"], {"input": {"ids": [key_id]}})
|
||||
result = (data.get("apiKey") or {}).get("delete")
|
||||
if not result:
|
||||
raise ToolError(
|
||||
f"Failed to delete API key '{key_id}': no confirmation from server"
|
||||
)
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"API key '{key_id}' deleted",
|
||||
}
|
||||
|
||||
if action == "add_role":
|
||||
if not key_id:
|
||||
raise ToolError("key_id is required for 'add_role' action")
|
||||
if not roles or len(roles) == 0:
|
||||
raise ToolError(
|
||||
"role is required for 'add_role' action (pass as roles=['ROLE_NAME'])"
|
||||
)
|
||||
data = await make_graphql_request(
|
||||
MUTATIONS["add_role"],
|
||||
{"input": {"apiKeyId": key_id, "role": roles[0]}},
|
||||
)
|
||||
return {"success": True, "message": f"Role '{roles[0]}' added to key '{key_id}'"}
|
||||
|
||||
if action == "remove_role":
|
||||
if not key_id:
|
||||
raise ToolError("key_id is required for 'remove_role' action")
|
||||
if not roles or len(roles) == 0:
|
||||
raise ToolError(
|
||||
"role is required for 'remove_role' action (pass as roles=['ROLE_NAME'])"
|
||||
)
|
||||
data = await make_graphql_request(
|
||||
MUTATIONS["remove_role"],
|
||||
{"input": {"apiKeyId": key_id, "role": roles[0]}},
|
||||
)
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Role '{roles[0]}' removed from key '{key_id}'",
|
||||
}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Keys tool registered successfully")
|
||||
@@ -1,142 +0,0 @@
|
||||
"""Real-time subscription snapshot tool.
|
||||
|
||||
Provides the `unraid_live` tool with 11 actions — one per GraphQL
|
||||
subscription. Each action opens a transient WebSocket, receives one event
|
||||
(or collects events for `collect_for` seconds), then closes.
|
||||
|
||||
Use `subscribe_once` actions for current-state reads (cpu, memory, array_state).
|
||||
Use `subscribe_collect` actions for event streams (notification_feed, log_tail).
|
||||
"""
|
||||
|
||||
import os
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
from ..subscriptions.queries import COLLECT_ACTIONS, SNAPSHOT_ACTIONS
|
||||
from ..subscriptions.snapshot import subscribe_collect, subscribe_once
|
||||
|
||||
|
||||
_ALLOWED_LOG_PREFIXES = ("/var/log/", "/boot/logs/", "/mnt/")
|
||||
|
||||
ALL_LIVE_ACTIONS = set(SNAPSHOT_ACTIONS) | set(COLLECT_ACTIONS)
|
||||
|
||||
LIVE_ACTIONS = Literal[
|
||||
"array_state",
|
||||
"cpu",
|
||||
"cpu_telemetry",
|
||||
"log_tail",
|
||||
"memory",
|
||||
"notification_feed",
|
||||
"notifications_overview",
|
||||
"owner",
|
||||
"parity_progress",
|
||||
"server_status",
|
||||
"ups_status",
|
||||
]
|
||||
|
||||
if set(get_args(LIVE_ACTIONS)) != ALL_LIVE_ACTIONS:
|
||||
_missing = ALL_LIVE_ACTIONS - set(get_args(LIVE_ACTIONS))
|
||||
_extra = set(get_args(LIVE_ACTIONS)) - ALL_LIVE_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"LIVE_ACTIONS and ALL_LIVE_ACTIONS are out of sync. "
|
||||
f"Missing: {_missing or 'none'}. Extra: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_live_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_live tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_live(
|
||||
action: LIVE_ACTIONS,
|
||||
path: str | None = None,
|
||||
collect_for: float = 5.0,
|
||||
timeout: float = 10.0, # noqa: ASYNC109
|
||||
) -> dict[str, Any]:
|
||||
"""Get real-time data from Unraid via WebSocket subscriptions.
|
||||
|
||||
Each action opens a transient WebSocket, receives data, then closes.
|
||||
|
||||
Snapshot actions (return current state):
|
||||
cpu - Real-time CPU utilization (all cores)
|
||||
memory - Real-time memory and swap utilization
|
||||
cpu_telemetry - CPU power draw and temperature per package
|
||||
array_state - Live array state and parity status
|
||||
parity_progress - Live parity check progress
|
||||
ups_status - Real-time UPS battery and power state
|
||||
notifications_overview - Live notification counts by severity
|
||||
owner - Live owner info
|
||||
server_status - Live server connection state
|
||||
|
||||
Collection actions (collect events for `collect_for` seconds):
|
||||
notification_feed - Collect new notification events (default: 5s window)
|
||||
log_tail - Tail a log file (requires path; default: 5s window)
|
||||
|
||||
Parameters:
|
||||
path - Log file path for log_tail action (required)
|
||||
collect_for - Seconds to collect events for collect actions (default: 5.0)
|
||||
timeout - WebSocket connection/handshake timeout in seconds (default: 10.0)
|
||||
"""
|
||||
if action not in ALL_LIVE_ACTIONS:
|
||||
raise ToolError(
|
||||
f"Invalid action '{action}'. Must be one of: {sorted(ALL_LIVE_ACTIONS)}"
|
||||
)
|
||||
|
||||
# Validate log_tail path before entering the error handler context.
|
||||
if action == "log_tail":
|
||||
if not path:
|
||||
raise ToolError("path is required for 'log_tail' action")
|
||||
# Resolve to prevent path traversal attacks (same as storage.py).
|
||||
# Using os.path.realpath instead of anyio.Path.resolve() because the
|
||||
# async variant blocks on NFS-mounted paths under /mnt/ (Perf-AI-1).
|
||||
normalized = os.path.realpath(path) # noqa: ASYNC240
|
||||
if not any(normalized.startswith(p) for p in _ALLOWED_LOG_PREFIXES):
|
||||
raise ToolError(
|
||||
f"path must start with one of: {', '.join(_ALLOWED_LOG_PREFIXES)}. Got: {path!r}"
|
||||
)
|
||||
path = normalized
|
||||
|
||||
with tool_error_handler("live", action, logger):
|
||||
logger.info(f"Executing unraid_live action={action} timeout={timeout}")
|
||||
|
||||
if action in SNAPSHOT_ACTIONS:
|
||||
data = await subscribe_once(SNAPSHOT_ACTIONS[action], timeout=timeout)
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
# Collect actions
|
||||
if action == "log_tail":
|
||||
events = await subscribe_collect(
|
||||
COLLECT_ACTIONS["log_tail"],
|
||||
variables={"path": path},
|
||||
collect_for=collect_for,
|
||||
timeout=timeout,
|
||||
)
|
||||
return {
|
||||
"success": True,
|
||||
"action": action,
|
||||
"path": path,
|
||||
"collect_for": collect_for,
|
||||
"event_count": len(events),
|
||||
"events": events,
|
||||
}
|
||||
|
||||
if action == "notification_feed":
|
||||
events = await subscribe_collect(
|
||||
COLLECT_ACTIONS["notification_feed"],
|
||||
collect_for=collect_for,
|
||||
timeout=timeout,
|
||||
)
|
||||
return {
|
||||
"success": True,
|
||||
"action": action,
|
||||
"collect_for": collect_for,
|
||||
"event_count": len(events),
|
||||
"events": events,
|
||||
}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Live tool registered successfully")
|
||||
@@ -1,301 +0,0 @@
|
||||
"""Notification management.
|
||||
|
||||
Provides the `unraid_notifications` tool with 13 actions for viewing,
|
||||
creating, archiving, and deleting system notifications.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"overview": """
|
||||
query GetNotificationsOverview {
|
||||
notifications {
|
||||
overview {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
}
|
||||
""",
|
||||
"list": """
|
||||
query ListNotifications($filter: NotificationFilter!) {
|
||||
notifications {
|
||||
list(filter: $filter) {
|
||||
id title subject description importance link type timestamp formattedTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"create": """
|
||||
mutation CreateNotification($input: NotificationData!) {
|
||||
createNotification(input: $input) { id title importance }
|
||||
}
|
||||
""",
|
||||
"archive": """
|
||||
mutation ArchiveNotification($id: PrefixedID!) {
|
||||
archiveNotification(id: $id) { id title importance }
|
||||
}
|
||||
""",
|
||||
"unread": """
|
||||
mutation UnreadNotification($id: PrefixedID!) {
|
||||
unreadNotification(id: $id) { id title importance }
|
||||
}
|
||||
""",
|
||||
"delete": """
|
||||
mutation DeleteNotification($id: PrefixedID!, $type: NotificationType!) {
|
||||
deleteNotification(id: $id, type: $type) {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
""",
|
||||
"delete_archived": """
|
||||
mutation DeleteArchivedNotifications {
|
||||
deleteArchivedNotifications {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
""",
|
||||
"archive_all": """
|
||||
mutation ArchiveAllNotifications($importance: NotificationImportance) {
|
||||
archiveAll(importance: $importance) {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
""",
|
||||
"archive_many": """
|
||||
mutation ArchiveNotifications($ids: [PrefixedID!]!) {
|
||||
archiveNotifications(ids: $ids) {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
""",
|
||||
"unarchive_many": """
|
||||
mutation UnarchiveNotifications($ids: [PrefixedID!]!) {
|
||||
unarchiveNotifications(ids: $ids) {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
""",
|
||||
"unarchive_all": """
|
||||
mutation UnarchiveAll($importance: NotificationImportance) {
|
||||
unarchiveAll(importance: $importance) {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
""",
|
||||
"recalculate": """
|
||||
mutation RecalculateOverview {
|
||||
recalculateOverview {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS = {"delete", "delete_archived"}
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
_VALID_IMPORTANCE = {"ALERT", "WARNING", "INFO"}
|
||||
|
||||
NOTIFICATION_ACTIONS = Literal[
|
||||
"overview",
|
||||
"list",
|
||||
"create",
|
||||
"archive",
|
||||
"unread",
|
||||
"delete",
|
||||
"delete_archived",
|
||||
"archive_all",
|
||||
"archive_many",
|
||||
"unarchive_many",
|
||||
"unarchive_all",
|
||||
"recalculate",
|
||||
]
|
||||
|
||||
if set(get_args(NOTIFICATION_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(NOTIFICATION_ACTIONS))
|
||||
_extra = set(get_args(NOTIFICATION_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"NOTIFICATION_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_notifications_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_notifications tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_notifications(
|
||||
action: NOTIFICATION_ACTIONS,
|
||||
confirm: bool = False,
|
||||
notification_id: str | None = None,
|
||||
notification_ids: list[str] | None = None,
|
||||
notification_type: str | None = None,
|
||||
importance: str | None = None,
|
||||
offset: int = 0,
|
||||
limit: int = 20,
|
||||
list_type: str = "UNREAD",
|
||||
title: str | None = None,
|
||||
subject: str | None = None,
|
||||
description: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage Unraid system notifications.
|
||||
|
||||
Actions:
|
||||
overview - Notification counts by severity (unread/archive)
|
||||
list - List notifications with filtering (list_type=UNREAD/ARCHIVE, importance=INFO/WARNING/ALERT)
|
||||
create - Create notification (requires title, subject, description, importance)
|
||||
archive - Archive a notification (requires notification_id)
|
||||
unread - Mark notification as unread (requires notification_id)
|
||||
delete - Delete a notification (requires notification_id, notification_type, confirm=True)
|
||||
delete_archived - Delete all archived notifications (requires confirm=True)
|
||||
archive_all - Archive all notifications (optional importance filter)
|
||||
archive_many - Archive multiple notifications by ID (requires notification_ids)
|
||||
unarchive_many - Move notifications back to unread (requires notification_ids)
|
||||
unarchive_all - Move all archived notifications to unread (optional importance filter)
|
||||
recalculate - Recompute overview counts from disk
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action in DESTRUCTIVE_ACTIONS and not confirm:
|
||||
raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.")
|
||||
|
||||
# Validate enum parameters before dispatching to GraphQL (SEC-M04).
|
||||
# Invalid values waste a rate-limited request and may leak schema details in errors.
|
||||
valid_list_types = frozenset({"UNREAD", "ARCHIVE"})
|
||||
valid_importance = frozenset({"INFO", "WARNING", "ALERT"})
|
||||
valid_notif_types = frozenset({"UNREAD", "ARCHIVE"})
|
||||
|
||||
if list_type.upper() not in valid_list_types:
|
||||
raise ToolError(
|
||||
f"Invalid list_type '{list_type}'. Must be one of: {sorted(valid_list_types)}"
|
||||
)
|
||||
if importance is not None and importance.upper() not in valid_importance:
|
||||
raise ToolError(
|
||||
f"Invalid importance '{importance}'. Must be one of: {sorted(valid_importance)}"
|
||||
)
|
||||
if notification_type is not None and notification_type.upper() not in valid_notif_types:
|
||||
raise ToolError(
|
||||
f"Invalid notification_type '{notification_type}'. "
|
||||
f"Must be one of: {sorted(valid_notif_types)}"
|
||||
)
|
||||
|
||||
with tool_error_handler("notifications", action, logger):
|
||||
logger.info(f"Executing unraid_notifications action={action}")
|
||||
|
||||
if action == "overview":
|
||||
data = await make_graphql_request(QUERIES["overview"])
|
||||
notifications = data.get("notifications") or {}
|
||||
return dict(notifications.get("overview") or {})
|
||||
|
||||
if action == "list":
|
||||
filter_vars: dict[str, Any] = {
|
||||
"type": list_type.upper(),
|
||||
"offset": offset,
|
||||
"limit": limit,
|
||||
}
|
||||
if importance:
|
||||
filter_vars["importance"] = importance.upper()
|
||||
data = await make_graphql_request(QUERIES["list"], {"filter": filter_vars})
|
||||
notifications = data.get("notifications", {})
|
||||
return {"notifications": notifications.get("list", [])}
|
||||
|
||||
if action == "create":
|
||||
if title is None or subject is None or description is None or importance is None:
|
||||
raise ToolError("create requires title, subject, description, and importance")
|
||||
if importance.upper() not in _VALID_IMPORTANCE:
|
||||
raise ToolError(
|
||||
f"importance must be one of: {', '.join(sorted(_VALID_IMPORTANCE))}. "
|
||||
f"Got: '{importance}'"
|
||||
)
|
||||
if len(title) > 200:
|
||||
raise ToolError(f"title must be at most 200 characters (got {len(title)})")
|
||||
if len(subject) > 500:
|
||||
raise ToolError(f"subject must be at most 500 characters (got {len(subject)})")
|
||||
if len(description) > 2000:
|
||||
raise ToolError(
|
||||
f"description must be at most 2000 characters (got {len(description)})"
|
||||
)
|
||||
input_data = {
|
||||
"title": title,
|
||||
"subject": subject,
|
||||
"description": description,
|
||||
"importance": importance.upper(),
|
||||
}
|
||||
data = await make_graphql_request(MUTATIONS["create"], {"input": input_data})
|
||||
notification = data.get("createNotification")
|
||||
if notification is None:
|
||||
raise ToolError("Notification creation failed: server returned no data")
|
||||
return {"success": True, "notification": notification}
|
||||
|
||||
if action in ("archive", "unread"):
|
||||
if not notification_id:
|
||||
raise ToolError(f"notification_id is required for '{action}' action")
|
||||
data = await make_graphql_request(MUTATIONS[action], {"id": notification_id})
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
if action == "delete":
|
||||
if not notification_id or not notification_type:
|
||||
raise ToolError("delete requires notification_id and notification_type")
|
||||
_del_vars = {"id": notification_id, "type": notification_type.upper()}
|
||||
data = await make_graphql_request(MUTATIONS["delete"], _del_vars)
|
||||
return {"success": True, "action": "delete", "data": data}
|
||||
|
||||
if action == "delete_archived":
|
||||
data = await make_graphql_request(MUTATIONS["delete_archived"])
|
||||
return {"success": True, "action": "delete_archived", "data": data}
|
||||
|
||||
if action == "archive_all":
|
||||
variables: dict[str, Any] | None = None
|
||||
if importance:
|
||||
variables = {"importance": importance.upper()}
|
||||
data = await make_graphql_request(MUTATIONS["archive_all"], variables)
|
||||
return {"success": True, "action": "archive_all", "data": data}
|
||||
|
||||
if action == "archive_many":
|
||||
if not notification_ids:
|
||||
raise ToolError("notification_ids is required for 'archive_many' action")
|
||||
data = await make_graphql_request(
|
||||
MUTATIONS["archive_many"], {"ids": notification_ids}
|
||||
)
|
||||
return {"success": True, "action": "archive_many", "data": data}
|
||||
|
||||
if action == "unarchive_many":
|
||||
if not notification_ids:
|
||||
raise ToolError("notification_ids is required for 'unarchive_many' action")
|
||||
data = await make_graphql_request(
|
||||
MUTATIONS["unarchive_many"], {"ids": notification_ids}
|
||||
)
|
||||
return {"success": True, "action": "unarchive_many", "data": data}
|
||||
|
||||
if action == "unarchive_all":
|
||||
vars_: dict[str, Any] | None = None
|
||||
if importance:
|
||||
vars_ = {"importance": importance.upper()}
|
||||
data = await make_graphql_request(MUTATIONS["unarchive_all"], vars_)
|
||||
return {"success": True, "action": "unarchive_all", "data": data}
|
||||
|
||||
if action == "recalculate":
|
||||
data = await make_graphql_request(MUTATIONS["recalculate"])
|
||||
return {"success": True, "action": "recalculate", "data": data}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Notifications tool registered successfully")
|
||||
@@ -1,115 +0,0 @@
|
||||
"""OIDC/SSO provider management and session validation.
|
||||
|
||||
Provides the `unraid_oidc` tool with 5 read-only actions for querying
|
||||
OIDC provider configuration and validating sessions.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"providers": """
|
||||
query GetOidcProviders {
|
||||
oidcProviders {
|
||||
id name clientId issuer authorizationEndpoint tokenEndpoint jwksUri
|
||||
scopes authorizationRules { claim operator value }
|
||||
authorizationRuleMode buttonText buttonIcon buttonVariant buttonStyle
|
||||
}
|
||||
}
|
||||
""",
|
||||
"provider": """
|
||||
query GetOidcProvider($id: PrefixedID!) {
|
||||
oidcProvider(id: $id) {
|
||||
id name clientId issuer scopes
|
||||
authorizationRules { claim operator value }
|
||||
authorizationRuleMode buttonText buttonIcon
|
||||
}
|
||||
}
|
||||
""",
|
||||
"configuration": """
|
||||
query GetOidcConfiguration {
|
||||
oidcConfiguration {
|
||||
providers { id name clientId scopes }
|
||||
defaultAllowedOrigins
|
||||
}
|
||||
}
|
||||
""",
|
||||
"public_providers": """
|
||||
query GetPublicOidcProviders {
|
||||
publicOidcProviders { id name buttonText buttonIcon buttonVariant buttonStyle }
|
||||
}
|
||||
""",
|
||||
"validate_session": """
|
||||
query ValidateOidcSession($token: String!) {
|
||||
validateOidcSession(token: $token) { valid username }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
ALL_ACTIONS = set(QUERIES)
|
||||
|
||||
OIDC_ACTIONS = Literal[
|
||||
"configuration",
|
||||
"provider",
|
||||
"providers",
|
||||
"public_providers",
|
||||
"validate_session",
|
||||
]
|
||||
|
||||
if set(get_args(OIDC_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(OIDC_ACTIONS))
|
||||
_extra = set(get_args(OIDC_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"OIDC_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing: {_missing or 'none'}. Extra: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_oidc_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_oidc tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_oidc(
|
||||
action: OIDC_ACTIONS,
|
||||
provider_id: str | None = None,
|
||||
token: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Query Unraid OIDC/SSO provider configuration and validate sessions.
|
||||
|
||||
Actions:
|
||||
providers - List all configured OIDC providers (admin only)
|
||||
provider - Get a specific OIDC provider by ID (requires provider_id)
|
||||
configuration - Get full OIDC configuration including default origins (admin only)
|
||||
public_providers - Get public OIDC provider info for login buttons (no auth)
|
||||
validate_session - Validate an OIDC session token (requires token)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action == "provider" and not provider_id:
|
||||
raise ToolError("provider_id is required for 'provider' action")
|
||||
|
||||
if action == "validate_session" and not token:
|
||||
raise ToolError("token is required for 'validate_session' action")
|
||||
|
||||
with tool_error_handler("oidc", action, logger):
|
||||
logger.info(f"Executing unraid_oidc action={action}")
|
||||
|
||||
if action == "provider":
|
||||
data = await make_graphql_request(QUERIES[action], {"id": provider_id})
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
if action == "validate_session":
|
||||
data = await make_graphql_request(QUERIES[action], {"token": token})
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
data = await make_graphql_request(QUERIES[action])
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
logger.info("OIDC tool registered successfully")
|
||||
@@ -1,111 +0,0 @@
|
||||
"""Plugin management for the Unraid API.
|
||||
|
||||
Provides the `unraid_plugins` tool with 3 actions: list, add, remove.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import Context, FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
from ..core.setup import elicit_destructive_confirmation
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"list": """
|
||||
query ListPlugins {
|
||||
plugins { name version hasApiModule hasCliModule }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"add": """
|
||||
mutation AddPlugin($input: PluginManagementInput!) {
|
||||
addPlugin(input: $input)
|
||||
}
|
||||
""",
|
||||
"remove": """
|
||||
mutation RemovePlugin($input: PluginManagementInput!) {
|
||||
removePlugin(input: $input)
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS = {"remove"}
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
|
||||
PLUGIN_ACTIONS = Literal["add", "list", "remove"]
|
||||
|
||||
if set(get_args(PLUGIN_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(PLUGIN_ACTIONS))
|
||||
_extra = set(get_args(PLUGIN_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"PLUGIN_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing: {_missing or 'none'}. Extra: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_plugins_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_plugins tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_plugins(
|
||||
action: PLUGIN_ACTIONS,
|
||||
ctx: Context | None = None,
|
||||
confirm: bool = False,
|
||||
names: list[str] | None = None,
|
||||
bundled: bool = False,
|
||||
restart: bool = True,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage Unraid API plugins.
|
||||
|
||||
Actions:
|
||||
list - List all installed plugins with version and module info
|
||||
add - Install one or more plugins (requires names: list of package names)
|
||||
remove - Remove one or more plugins (requires names, confirm=True)
|
||||
|
||||
Parameters:
|
||||
names - List of plugin package names (required for add/remove)
|
||||
bundled - Whether plugins are bundled (default: False)
|
||||
restart - Whether to auto-restart API after operation (default: True)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action in DESTRUCTIVE_ACTIONS and not confirm:
|
||||
_desc = f"Remove plugin(s) **{names}** from the Unraid API. This cannot be undone without re-installing."
|
||||
confirmed = await elicit_destructive_confirmation(ctx, action, _desc)
|
||||
if not confirmed:
|
||||
raise ToolError(
|
||||
f"Action '{action}' was not confirmed. "
|
||||
"Re-run with confirm=True to bypass elicitation."
|
||||
)
|
||||
|
||||
with tool_error_handler("plugins", action, logger):
|
||||
logger.info(f"Executing unraid_plugins action={action}")
|
||||
|
||||
if action == "list":
|
||||
data = await make_graphql_request(QUERIES["list"])
|
||||
return {"success": True, "action": action, "data": data}
|
||||
|
||||
if action in ("add", "remove"):
|
||||
if not names:
|
||||
raise ToolError(f"names is required for '{action}' action")
|
||||
input_data = {"names": names, "bundled": bundled, "restart": restart}
|
||||
mutation_key = "add" if action == "add" else "remove"
|
||||
data = await make_graphql_request(MUTATIONS[mutation_key], {"input": input_data})
|
||||
result_key = "addPlugin" if action == "add" else "removePlugin"
|
||||
restart_required = data.get(result_key)
|
||||
return {
|
||||
"success": True,
|
||||
"action": action,
|
||||
"names": names,
|
||||
"manual_restart_required": restart_required,
|
||||
}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Plugins tool registered successfully")
|
||||
@@ -1,191 +0,0 @@
|
||||
"""RClone cloud storage remote management.
|
||||
|
||||
Provides the `unraid_rclone` tool with 4 actions for managing
|
||||
cloud storage remotes (S3, Google Drive, Dropbox, FTP, etc.).
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"list_remotes": """
|
||||
query ListRCloneRemotes {
|
||||
rclone { remotes { name type parameters config } }
|
||||
}
|
||||
""",
|
||||
"config_form": """
|
||||
query GetRCloneConfigForm($formOptions: RCloneConfigFormInput) {
|
||||
rclone { configForm(formOptions: $formOptions) { id dataSchema uiSchema } }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"create_remote": """
|
||||
mutation CreateRCloneRemote($input: CreateRCloneRemoteInput!) {
|
||||
rclone { createRCloneRemote(input: $input) { name type parameters } }
|
||||
}
|
||||
""",
|
||||
"delete_remote": """
|
||||
mutation DeleteRCloneRemote($input: DeleteRCloneRemoteInput!) {
|
||||
rclone { deleteRCloneRemote(input: $input) }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS = {"delete_remote"}
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
|
||||
RCLONE_ACTIONS = Literal[
|
||||
"list_remotes",
|
||||
"config_form",
|
||||
"create_remote",
|
||||
"delete_remote",
|
||||
]
|
||||
|
||||
if set(get_args(RCLONE_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(RCLONE_ACTIONS))
|
||||
_extra = set(get_args(RCLONE_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"RCLONE_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
# Max config entries to prevent abuse
|
||||
_MAX_CONFIG_KEYS = 50
|
||||
# Pattern for suspicious key names (path traversal, shell metacharacters)
|
||||
_DANGEROUS_KEY_PATTERN = re.compile(r"\.\.|[/\\;|`$(){}]")
|
||||
# Max length for individual config values
|
||||
_MAX_VALUE_LENGTH = 4096
|
||||
|
||||
|
||||
def _validate_config_data(config_data: dict[str, Any]) -> dict[str, str]:
|
||||
"""Validate and sanitize rclone config_data before passing to GraphQL.
|
||||
|
||||
Ensures all keys and values are safe strings with no injection vectors.
|
||||
|
||||
Raises:
|
||||
ToolError: If config_data contains invalid keys or values
|
||||
"""
|
||||
if len(config_data) > _MAX_CONFIG_KEYS:
|
||||
raise ToolError(f"config_data has {len(config_data)} keys (max {_MAX_CONFIG_KEYS})")
|
||||
|
||||
validated: dict[str, str] = {}
|
||||
for key, value in config_data.items():
|
||||
if not isinstance(key, str) or not key.strip():
|
||||
raise ToolError(
|
||||
f"config_data keys must be non-empty strings, got: {type(key).__name__}"
|
||||
)
|
||||
if _DANGEROUS_KEY_PATTERN.search(key):
|
||||
raise ToolError(
|
||||
f"config_data key '{key}' contains disallowed characters "
|
||||
f"(path traversal or shell metacharacters)"
|
||||
)
|
||||
if not isinstance(value, (str, int, float, bool)):
|
||||
raise ToolError(
|
||||
f"config_data['{key}'] must be a string, number, or boolean, "
|
||||
f"got: {type(value).__name__}"
|
||||
)
|
||||
str_value = str(value)
|
||||
if len(str_value) > _MAX_VALUE_LENGTH:
|
||||
raise ToolError(
|
||||
f"config_data['{key}'] value exceeds max length "
|
||||
f"({len(str_value)} > {_MAX_VALUE_LENGTH})"
|
||||
)
|
||||
validated[key] = str_value
|
||||
|
||||
return validated
|
||||
|
||||
|
||||
def register_rclone_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_rclone tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_rclone(
|
||||
action: RCLONE_ACTIONS,
|
||||
confirm: bool = False,
|
||||
name: str | None = None,
|
||||
provider_type: str | None = None,
|
||||
config_data: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage RClone cloud storage remotes.
|
||||
|
||||
Actions:
|
||||
list_remotes - List all configured remotes
|
||||
config_form - Get config form schema (optional provider_type for specific provider)
|
||||
create_remote - Create a new remote (requires name, provider_type, config_data)
|
||||
delete_remote - Delete a remote (requires name, confirm=True)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action in DESTRUCTIVE_ACTIONS and not confirm:
|
||||
raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.")
|
||||
|
||||
with tool_error_handler("rclone", action, logger):
|
||||
logger.info(f"Executing unraid_rclone action={action}")
|
||||
|
||||
if action == "list_remotes":
|
||||
data = await make_graphql_request(QUERIES["list_remotes"])
|
||||
remotes = data.get("rclone", {}).get("remotes", [])
|
||||
return {"remotes": list(remotes) if isinstance(remotes, list) else []}
|
||||
|
||||
if action == "config_form":
|
||||
variables: dict[str, Any] = {}
|
||||
if provider_type:
|
||||
variables["formOptions"] = {"providerType": provider_type}
|
||||
data = await make_graphql_request(QUERIES["config_form"], variables or None)
|
||||
form = data.get("rclone", {}).get("configForm", {})
|
||||
if not form:
|
||||
raise ToolError("No RClone config form data received")
|
||||
return dict(form)
|
||||
|
||||
if action == "create_remote":
|
||||
if name is None or provider_type is None or config_data is None:
|
||||
raise ToolError("create_remote requires name, provider_type, and config_data")
|
||||
validated_config = _validate_config_data(config_data)
|
||||
data = await make_graphql_request(
|
||||
MUTATIONS["create_remote"],
|
||||
{
|
||||
"input": {
|
||||
"name": name,
|
||||
"type": provider_type,
|
||||
"parameters": validated_config,
|
||||
}
|
||||
},
|
||||
)
|
||||
remote = data.get("rclone", {}).get("createRCloneRemote")
|
||||
if not remote:
|
||||
raise ToolError(
|
||||
f"Failed to create remote '{name}': no confirmation from server"
|
||||
)
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Remote '{name}' created successfully",
|
||||
"remote": remote,
|
||||
}
|
||||
|
||||
if action == "delete_remote":
|
||||
if not name:
|
||||
raise ToolError("name is required for 'delete_remote' action")
|
||||
data = await make_graphql_request(
|
||||
MUTATIONS["delete_remote"], {"input": {"name": name}}
|
||||
)
|
||||
success = data.get("rclone", {}).get("deleteRCloneRemote", False)
|
||||
if not success:
|
||||
raise ToolError(f"Failed to delete remote '{name}'")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Remote '{name}' deleted successfully",
|
||||
}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("RClone tool registered successfully")
|
||||
@@ -1,93 +0,0 @@
|
||||
"""System settings and UPS mutations.
|
||||
|
||||
Provides the `unraid_settings` tool with 2 actions for updating system
|
||||
configuration and UPS monitoring.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"update": """
|
||||
mutation UpdateSettings($input: JSON!) {
|
||||
updateSettings(input: $input) { restartRequired values warnings }
|
||||
}
|
||||
""",
|
||||
"configure_ups": """
|
||||
mutation ConfigureUps($config: UPSConfigInput!) {
|
||||
configureUps(config: $config)
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS = {
|
||||
"configure_ups",
|
||||
}
|
||||
ALL_ACTIONS = set(MUTATIONS)
|
||||
|
||||
SETTINGS_ACTIONS = Literal[
|
||||
"configure_ups",
|
||||
"update",
|
||||
]
|
||||
|
||||
if set(get_args(SETTINGS_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(SETTINGS_ACTIONS))
|
||||
_extra = set(get_args(SETTINGS_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"SETTINGS_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_settings_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_settings tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_settings(
|
||||
action: SETTINGS_ACTIONS,
|
||||
confirm: bool = False,
|
||||
settings_input: dict[str, Any] | None = None,
|
||||
ups_config: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Update Unraid system settings and UPS configuration.
|
||||
|
||||
Actions:
|
||||
update - Update system settings (requires settings_input dict)
|
||||
configure_ups - Configure UPS monitoring (requires ups_config dict, confirm=True)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action in DESTRUCTIVE_ACTIONS and not confirm:
|
||||
raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.")
|
||||
|
||||
with tool_error_handler("settings", action, logger):
|
||||
logger.info(f"Executing unraid_settings action={action}")
|
||||
|
||||
if action == "update":
|
||||
if settings_input is None:
|
||||
raise ToolError("settings_input is required for 'update' action")
|
||||
data = await make_graphql_request(MUTATIONS["update"], {"input": settings_input})
|
||||
return {"success": True, "action": "update", "data": data.get("updateSettings")}
|
||||
|
||||
if action == "configure_ups":
|
||||
if ups_config is None:
|
||||
raise ToolError("ups_config is required for 'configure_ups' action")
|
||||
data = await make_graphql_request(
|
||||
MUTATIONS["configure_ups"], {"config": ups_config}
|
||||
)
|
||||
return {
|
||||
"success": True,
|
||||
"action": "configure_ups",
|
||||
"result": data.get("configureUps"),
|
||||
}
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Settings tool registered successfully")
|
||||
@@ -1,207 +0,0 @@
|
||||
"""Storage and disk management.
|
||||
|
||||
Provides the `unraid_storage` tool with 6 actions for shares, physical disks,
|
||||
log files, and log content retrieval.
|
||||
"""
|
||||
|
||||
import os
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import DISK_TIMEOUT, make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
from ..core.utils import format_bytes
|
||||
|
||||
|
||||
_ALLOWED_LOG_PREFIXES = ("/var/log/", "/boot/logs/", "/mnt/")
|
||||
_MAX_TAIL_LINES = 10_000
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"shares": """
|
||||
query GetSharesInfo {
|
||||
shares {
|
||||
id name free used size include exclude cache nameOrig
|
||||
comment allocator splitLevel floor cow color luksStatus
|
||||
}
|
||||
}
|
||||
""",
|
||||
"disks": """
|
||||
query ListPhysicalDisks {
|
||||
disks { id device name }
|
||||
}
|
||||
""",
|
||||
"disk_details": """
|
||||
query GetDiskDetails($id: PrefixedID!) {
|
||||
disk(id: $id) {
|
||||
id device name serialNum size temperature
|
||||
}
|
||||
}
|
||||
""",
|
||||
"log_files": """
|
||||
query ListLogFiles {
|
||||
logFiles { name path size modifiedAt }
|
||||
}
|
||||
""",
|
||||
"logs": """
|
||||
query GetLogContent($path: String!, $lines: Int) {
|
||||
logFile(path: $path, lines: $lines) {
|
||||
path content totalLines startLine
|
||||
}
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"flash_backup": """
|
||||
mutation InitiateFlashBackup($input: InitiateFlashBackupInput!) {
|
||||
initiateFlashBackup(input: $input) { status jobId }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS = {"flash_backup"}
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
|
||||
STORAGE_ACTIONS = Literal[
|
||||
"shares",
|
||||
"disks",
|
||||
"disk_details",
|
||||
"log_files",
|
||||
"logs",
|
||||
"flash_backup",
|
||||
]
|
||||
|
||||
if set(get_args(STORAGE_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(STORAGE_ACTIONS))
|
||||
_extra = set(get_args(STORAGE_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"STORAGE_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_storage_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_storage tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_storage(
|
||||
action: STORAGE_ACTIONS,
|
||||
disk_id: str | None = None,
|
||||
log_path: str | None = None,
|
||||
tail_lines: int = 100,
|
||||
confirm: bool = False,
|
||||
remote_name: str | None = None,
|
||||
source_path: str | None = None,
|
||||
destination_path: str | None = None,
|
||||
backup_options: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage Unraid storage, disks, and logs.
|
||||
|
||||
Actions:
|
||||
shares - List all user shares with capacity info
|
||||
disks - List all physical disks
|
||||
disk_details - Detailed SMART info for a disk (requires disk_id)
|
||||
log_files - List available log files
|
||||
logs - Retrieve log content (requires log_path, optional tail_lines)
|
||||
flash_backup - Initiate flash backup via rclone (requires remote_name, source_path, destination_path, confirm=True)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action in DESTRUCTIVE_ACTIONS and not confirm:
|
||||
raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.")
|
||||
|
||||
if action == "disk_details" and not disk_id:
|
||||
raise ToolError("disk_id is required for 'disk_details' action")
|
||||
|
||||
if action == "logs" and (tail_lines < 1 or tail_lines > _MAX_TAIL_LINES):
|
||||
raise ToolError(f"tail_lines must be between 1 and {_MAX_TAIL_LINES}, got {tail_lines}")
|
||||
|
||||
if action == "logs":
|
||||
if not log_path:
|
||||
raise ToolError("log_path is required for 'logs' action")
|
||||
# Resolve path synchronously to prevent traversal attacks.
|
||||
# Using os.path.realpath instead of anyio.Path.resolve() because the
|
||||
# async variant blocks on NFS-mounted paths under /mnt/ (Perf-AI-1).
|
||||
normalized = os.path.realpath(log_path) # noqa: ASYNC240
|
||||
if not any(normalized.startswith(p) for p in _ALLOWED_LOG_PREFIXES):
|
||||
raise ToolError(
|
||||
f"log_path must start with one of: {', '.join(_ALLOWED_LOG_PREFIXES)}. "
|
||||
f"Use log_files action to discover valid paths."
|
||||
)
|
||||
log_path = normalized
|
||||
|
||||
if action == "flash_backup":
|
||||
if not remote_name:
|
||||
raise ToolError("remote_name is required for 'flash_backup' action")
|
||||
if not source_path:
|
||||
raise ToolError("source_path is required for 'flash_backup' action")
|
||||
if not destination_path:
|
||||
raise ToolError("destination_path is required for 'flash_backup' action")
|
||||
input_data: dict[str, Any] = {
|
||||
"remoteName": remote_name,
|
||||
"sourcePath": source_path,
|
||||
"destinationPath": destination_path,
|
||||
}
|
||||
if backup_options is not None:
|
||||
input_data["options"] = backup_options
|
||||
with tool_error_handler("storage", action, logger):
|
||||
logger.info("Executing unraid_storage action=flash_backup")
|
||||
data = await make_graphql_request(MUTATIONS["flash_backup"], {"input": input_data})
|
||||
backup = data.get("initiateFlashBackup")
|
||||
if not backup:
|
||||
raise ToolError("Failed to start flash backup: no confirmation from server")
|
||||
return {
|
||||
"success": True,
|
||||
"action": "flash_backup",
|
||||
"data": backup,
|
||||
}
|
||||
|
||||
query = QUERIES[action]
|
||||
variables: dict[str, Any] | None = None
|
||||
custom_timeout = DISK_TIMEOUT if action in ("disks", "disk_details") else None
|
||||
|
||||
if action == "disk_details":
|
||||
variables = {"id": disk_id}
|
||||
elif action == "logs":
|
||||
variables = {"path": log_path, "lines": tail_lines}
|
||||
|
||||
with tool_error_handler("storage", action, logger):
|
||||
logger.info(f"Executing unraid_storage action={action}")
|
||||
data = await make_graphql_request(query, variables, custom_timeout=custom_timeout)
|
||||
|
||||
if action == "shares":
|
||||
return {"shares": data.get("shares", [])}
|
||||
|
||||
if action == "disks":
|
||||
return {"disks": data.get("disks", [])}
|
||||
|
||||
if action == "disk_details":
|
||||
raw = data.get("disk", {})
|
||||
if not raw:
|
||||
raise ToolError(f"Disk '{disk_id}' not found")
|
||||
summary = {
|
||||
"disk_id": raw.get("id"),
|
||||
"device": raw.get("device"),
|
||||
"name": raw.get("name"),
|
||||
"serial_number": raw.get("serialNum"),
|
||||
"size_formatted": format_bytes(raw.get("size")),
|
||||
"temperature": (
|
||||
f"{raw['temperature']}\u00b0C"
|
||||
if raw.get("temperature") is not None
|
||||
else "N/A"
|
||||
),
|
||||
}
|
||||
return {"summary": summary, "details": raw}
|
||||
|
||||
if action == "log_files":
|
||||
return {"log_files": data.get("logFiles", [])}
|
||||
|
||||
if action == "logs":
|
||||
return dict(data.get("logFile") or {})
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("Storage tool registered successfully")
|
||||
1939
unraid_mcp/tools/unraid.py
Normal file
1939
unraid_mcp/tools/unraid.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,51 +0,0 @@
|
||||
"""User account query.
|
||||
|
||||
Provides the `unraid_users` tool with 1 action for querying the current authenticated user.
|
||||
Note: Unraid GraphQL API does not support user management operations (list, add, delete).
|
||||
"""
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"me": """
|
||||
query GetMe {
|
||||
me { id name description roles }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
ALL_ACTIONS = set(QUERIES)
|
||||
|
||||
USER_ACTIONS = Literal["me"]
|
||||
|
||||
|
||||
def register_users_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_users tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_users(
|
||||
action: USER_ACTIONS = "me",
|
||||
) -> dict[str, Any]:
|
||||
"""Query current authenticated user.
|
||||
|
||||
Actions:
|
||||
me - Get current authenticated user info (id, name, description, roles)
|
||||
|
||||
Note: Unraid API does not support user management operations (list, add, delete).
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
with tool_error_handler("users", action, logger):
|
||||
logger.info("Executing unraid_users action=me")
|
||||
data = await make_graphql_request(QUERIES["me"])
|
||||
return data.get("me") or {}
|
||||
|
||||
logger.info("Users tool registered successfully")
|
||||
@@ -1,155 +0,0 @@
|
||||
"""Virtual machine management.
|
||||
|
||||
Provides the `unraid_vm` tool with 9 actions for VM lifecycle management
|
||||
including start, stop, pause, resume, force stop, reboot, and reset.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal, get_args
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..core.client import make_graphql_request
|
||||
from ..core.exceptions import ToolError, tool_error_handler
|
||||
|
||||
|
||||
QUERIES: dict[str, str] = {
|
||||
"list": """
|
||||
query ListVMs {
|
||||
vms { id domains { id name state uuid } }
|
||||
}
|
||||
""",
|
||||
# NOTE: The Unraid GraphQL API does not expose a single-VM query.
|
||||
# The details query is identical to list; client-side filtering is required.
|
||||
"details": """
|
||||
query ListVMs {
|
||||
vms { id domains { id name state uuid } }
|
||||
}
|
||||
""",
|
||||
}
|
||||
|
||||
MUTATIONS: dict[str, str] = {
|
||||
"start": """
|
||||
mutation StartVM($id: PrefixedID!) { vm { start(id: $id) } }
|
||||
""",
|
||||
"stop": """
|
||||
mutation StopVM($id: PrefixedID!) { vm { stop(id: $id) } }
|
||||
""",
|
||||
"pause": """
|
||||
mutation PauseVM($id: PrefixedID!) { vm { pause(id: $id) } }
|
||||
""",
|
||||
"resume": """
|
||||
mutation ResumeVM($id: PrefixedID!) { vm { resume(id: $id) } }
|
||||
""",
|
||||
"force_stop": """
|
||||
mutation ForceStopVM($id: PrefixedID!) { vm { forceStop(id: $id) } }
|
||||
""",
|
||||
"reboot": """
|
||||
mutation RebootVM($id: PrefixedID!) { vm { reboot(id: $id) } }
|
||||
""",
|
||||
"reset": """
|
||||
mutation ResetVM($id: PrefixedID!) { vm { reset(id: $id) } }
|
||||
""",
|
||||
}
|
||||
|
||||
# Map action names to GraphQL field names (only where they differ)
|
||||
_MUTATION_FIELDS: dict[str, str] = {
|
||||
"force_stop": "forceStop",
|
||||
}
|
||||
|
||||
DESTRUCTIVE_ACTIONS = {"force_stop", "reset"}
|
||||
|
||||
VM_ACTIONS = Literal[
|
||||
"list",
|
||||
"details",
|
||||
"start",
|
||||
"stop",
|
||||
"pause",
|
||||
"resume",
|
||||
"force_stop",
|
||||
"reboot",
|
||||
"reset",
|
||||
]
|
||||
|
||||
ALL_ACTIONS = set(QUERIES) | set(MUTATIONS)
|
||||
|
||||
if set(get_args(VM_ACTIONS)) != ALL_ACTIONS:
|
||||
_missing = ALL_ACTIONS - set(get_args(VM_ACTIONS))
|
||||
_extra = set(get_args(VM_ACTIONS)) - ALL_ACTIONS
|
||||
raise RuntimeError(
|
||||
f"VM_ACTIONS and ALL_ACTIONS are out of sync. "
|
||||
f"Missing from Literal: {_missing or 'none'}. Extra in Literal: {_extra or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
def register_vm_tool(mcp: FastMCP) -> None:
|
||||
"""Register the unraid_vm tool with the FastMCP instance."""
|
||||
|
||||
@mcp.tool()
|
||||
async def unraid_vm(
|
||||
action: VM_ACTIONS,
|
||||
vm_id: str | None = None,
|
||||
confirm: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Manage Unraid virtual machines.
|
||||
|
||||
Actions:
|
||||
list - List all VMs with state
|
||||
details - Detailed info for a VM (requires vm_id: UUID, PrefixedID, or name)
|
||||
start - Start a VM (requires vm_id)
|
||||
stop - Gracefully stop a VM (requires vm_id)
|
||||
pause - Pause a VM (requires vm_id)
|
||||
resume - Resume a paused VM (requires vm_id)
|
||||
force_stop - Force stop a VM (requires vm_id, confirm=True)
|
||||
reboot - Reboot a VM (requires vm_id)
|
||||
reset - Reset a VM (requires vm_id, confirm=True)
|
||||
"""
|
||||
if action not in ALL_ACTIONS:
|
||||
raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}")
|
||||
|
||||
if action != "list" and not vm_id:
|
||||
raise ToolError(f"vm_id is required for '{action}' action")
|
||||
|
||||
if action in DESTRUCTIVE_ACTIONS and not confirm:
|
||||
raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.")
|
||||
|
||||
with tool_error_handler("vm", action, logger):
|
||||
logger.info(f"Executing unraid_vm action={action}")
|
||||
|
||||
if action == "list":
|
||||
data = await make_graphql_request(QUERIES["list"])
|
||||
if data.get("vms"):
|
||||
vms = data["vms"].get("domains") or data["vms"].get("domain") or []
|
||||
if isinstance(vms, dict):
|
||||
vms = [vms]
|
||||
return {"vms": vms}
|
||||
return {"vms": []}
|
||||
|
||||
if action == "details":
|
||||
data = await make_graphql_request(QUERIES["details"])
|
||||
if not data.get("vms"):
|
||||
raise ToolError("No VM data returned from server")
|
||||
vms = data["vms"].get("domains") or data["vms"].get("domain") or []
|
||||
if isinstance(vms, dict):
|
||||
vms = [vms]
|
||||
for vm in vms:
|
||||
if vm.get("uuid") == vm_id or vm.get("id") == vm_id or vm.get("name") == vm_id:
|
||||
return dict(vm)
|
||||
available = [f"{v.get('name')} (UUID: {v.get('uuid')})" for v in vms]
|
||||
raise ToolError(f"VM '{vm_id}' not found. Available: {', '.join(available)}")
|
||||
|
||||
# Mutations
|
||||
if action in MUTATIONS:
|
||||
data = await make_graphql_request(MUTATIONS[action], {"id": vm_id})
|
||||
field = _MUTATION_FIELDS.get(action, action)
|
||||
if data.get("vm") and field in data["vm"]:
|
||||
return {
|
||||
"success": data["vm"][field],
|
||||
"action": action,
|
||||
"vm_id": vm_id,
|
||||
}
|
||||
raise ToolError(f"Failed to {action} VM or unexpected response")
|
||||
|
||||
raise ToolError(f"Unhandled action '{action}' — this is a bug")
|
||||
|
||||
logger.info("VM tool registered successfully")
|
||||
140
uv.lock
generated
140
uv.lock
generated
@@ -187,59 +187,75 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "charset-normalizer"
|
||||
version = "3.4.5"
|
||||
version = "3.4.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1d/35/02daf95b9cd686320bb622eb148792655c9412dbb9b67abb5694e5910a24/charset_normalizer-3.4.5.tar.gz", hash = "sha256:95adae7b6c42a6c5b5b559b1a99149f090a57128155daeea91732c8d970d8644", size = 134804, upload-time = "2026-03-06T06:03:19.46Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7b/60/e3bec1881450851b087e301bedc3daa9377a4d45f1c26aa90b0b235e38aa/charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6", size = 143363, upload-time = "2026-03-15T18:53:25.478Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/b6/9ee9c1a608916ca5feae81a344dffbaa53b26b90be58cc2159e3332d44ec/charset_normalizer-3.4.5-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ed97c282ee4f994ef814042423a529df9497e3c666dca19be1d4cd1129dc7ade", size = 280976, upload-time = "2026-03-06T06:01:15.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/d8/a54f7c0b96f1df3563e9190f04daf981e365a9b397eedfdfb5dbef7e5c6c/charset_normalizer-3.4.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0294916d6ccf2d069727d65973c3a1ca477d68708db25fd758dd28b0827cff54", size = 189356, upload-time = "2026-03-06T06:01:16.511Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/69/2bf7f76ce1446759a5787cb87d38f6a61eb47dbbdf035cfebf6347292a65/charset_normalizer-3.4.5-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:dc57a0baa3eeedd99fafaef7511b5a6ef4581494e8168ee086031744e2679467", size = 206369, upload-time = "2026-03-06T06:01:17.853Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/9c/949d1a46dab56b959d9a87272482195f1840b515a3380e39986989a893ae/charset_normalizer-3.4.5-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ed1a9a204f317ef879b32f9af507d47e49cd5e7f8e8d5d96358c98373314fc60", size = 203285, upload-time = "2026-03-06T06:01:19.473Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/5c/ae30362a88b4da237d71ea214a8c7eb915db3eec941adda511729ac25fa2/charset_normalizer-3.4.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ad83b8f9379176c841f8865884f3514d905bcd2a9a3b210eaa446e7d2223e4d", size = 196274, upload-time = "2026-03-06T06:01:20.728Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/07/c9f2cb0e46cb6d64fdcc4f95953747b843bb2181bda678dc4e699b8f0f9a/charset_normalizer-3.4.5-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:a118e2e0b5ae6b0120d5efa5f866e58f2bb826067a646431da4d6a2bdae7950e", size = 184715, upload-time = "2026-03-06T06:01:22.194Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/64/6b0ca95c44fddf692cd06d642b28f63009d0ce325fad6e9b2b4d0ef86a52/charset_normalizer-3.4.5-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:754f96058e61a5e22e91483f823e07df16416ce76afa4ebf306f8e1d1296d43f", size = 193426, upload-time = "2026-03-06T06:01:23.795Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/bc/a730690d726403743795ca3f5bb2baf67838c5fea78236098f324b965e40/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0c300cefd9b0970381a46394902cd18eaf2aa00163f999590ace991989dcd0fc", size = 191780, upload-time = "2026-03-06T06:01:25.053Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/4f/6c0bc9af68222b22951552d73df4532b5be6447cee32d58e7e8c74ecbb7b/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c108f8619e504140569ee7de3f97d234f0fbae338a7f9f360455071ef9855a95", size = 185805, upload-time = "2026-03-06T06:01:26.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/b9/a523fb9b0ee90814b503452b2600e4cbc118cd68714d57041564886e7325/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d1028de43596a315e2720a9849ee79007ab742c06ad8b45a50db8cdb7ed4a82a", size = 208342, upload-time = "2026-03-06T06:01:27.55Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/61/c59e761dee4464050713e50e27b58266cc8e209e518c0b378c1580c959ba/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:19092dde50335accf365cce21998a1c6dd8eafd42c7b226eb54b2747cdce2fac", size = 193661, upload-time = "2026-03-06T06:01:29.051Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/43/729fa30aad69783f755c5ad8649da17ee095311ca42024742701e202dc59/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4354e401eb6dab9aed3c7b4030514328a6c748d05e1c3e19175008ca7de84fb1", size = 204819, upload-time = "2026-03-06T06:01:30.298Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/87/33/d9b442ce5a91b96fc0840455a9e49a611bbadae6122778d0a6a79683dd31/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a68766a3c58fde7f9aaa22b3786276f62ab2f594efb02d0a1421b6282e852e98", size = 198080, upload-time = "2026-03-06T06:01:31.478Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/5a/b8b5a23134978ee9885cee2d6995f4c27cc41f9baded0a9685eabc5338f0/charset_normalizer-3.4.5-cp312-cp312-win32.whl", hash = "sha256:1827734a5b308b65ac54e86a618de66f935a4f63a8a462ff1e19a6788d6c2262", size = 132630, upload-time = "2026-03-06T06:01:33.056Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/53/e44a4c07e8904500aec95865dc3f6464dc3586a039ef0df606eb3ac38e35/charset_normalizer-3.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:728c6a963dfab66ef865f49286e45239384249672cd598576765acc2a640a636", size = 142856, upload-time = "2026-03-06T06:01:34.489Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/aa/c5628f7cad591b1cf45790b7a61483c3e36cf41349c98af7813c483fd6e8/charset_normalizer-3.4.5-cp312-cp312-win_arm64.whl", hash = "sha256:75dfd1afe0b1647449e852f4fb428195a7ed0588947218f7ba929f6538487f02", size = 132982, upload-time = "2026-03-06T06:01:35.641Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/48/9f34ec4bb24aa3fdba1890c1bddb97c8a4be1bd84ef5c42ac2352563ad05/charset_normalizer-3.4.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ac59c15e3f1465f722607800c68713f9fbc2f672b9eb649fe831da4019ae9b23", size = 280788, upload-time = "2026-03-06T06:01:37.126Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/09/6003e7ffeb90cc0560da893e3208396a44c210c5ee42efff539639def59b/charset_normalizer-3.4.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:165c7b21d19365464e8f70e5ce5e12524c58b48c78c1f5a57524603c1ab003f8", size = 188890, upload-time = "2026-03-06T06:01:38.73Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/1e/02706edf19e390680daa694d17e2b8eab4b5f7ac285e2a51168b4b22ee6b/charset_normalizer-3.4.5-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:28269983f25a4da0425743d0d257a2d6921ea7d9b83599d4039486ec5b9f911d", size = 206136, upload-time = "2026-03-06T06:01:40.016Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/87/942c3def1b37baf3cf786bad01249190f3ca3d5e63a84f831e704977de1f/charset_normalizer-3.4.5-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d27ce22ec453564770d29d03a9506d449efbb9fa13c00842262b2f6801c48cce", size = 202551, upload-time = "2026-03-06T06:01:41.522Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/0a/af49691938dfe175d71b8a929bd7e4ace2809c0c5134e28bc535660d5262/charset_normalizer-3.4.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0625665e4ebdddb553ab185de5db7054393af8879fb0c87bd5690d14379d6819", size = 195572, upload-time = "2026-03-06T06:01:43.208Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/ea/dfb1792a8050a8e694cfbde1570ff97ff74e48afd874152d38163d1df9ae/charset_normalizer-3.4.5-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:c23eb3263356d94858655b3e63f85ac5d50970c6e8febcdde7830209139cc37d", size = 184438, upload-time = "2026-03-06T06:01:44.755Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/12/c281e2067466e3ddd0595bfaea58a6946765ace5c72dfa3edc2f5f118026/charset_normalizer-3.4.5-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e6302ca4ae283deb0af68d2fbf467474b8b6aedcd3dab4db187e07f94c109763", size = 193035, upload-time = "2026-03-06T06:01:46.051Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/4f/3792c056e7708e10464bad0438a44708886fb8f92e3c3d29ec5e2d964d42/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e51ae7d81c825761d941962450f50d041db028b7278e7b08930b4541b3e45cb9", size = 191340, upload-time = "2026-03-06T06:01:47.547Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/86/80ddba897127b5c7a9bccc481b0cd36c8fefa485d113262f0fe4332f0bf4/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:597d10dec876923e5c59e48dbd366e852eacb2b806029491d307daea6b917d7c", size = 185464, upload-time = "2026-03-06T06:01:48.764Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/00/b5eff85ba198faacab83e0e4b6f0648155f072278e3b392a82478f8b988b/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:5cffde4032a197bd3b42fd0b9509ec60fb70918d6970e4cc773f20fc9180ca67", size = 208014, upload-time = "2026-03-06T06:01:50.371Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/11/d36f70be01597fd30850dde8a1269ebc8efadd23ba5785808454f2389bde/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2da4eedcb6338e2321e831a0165759c0c620e37f8cd044a263ff67493be8ffb3", size = 193297, upload-time = "2026-03-06T06:01:51.933Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/1d/259eb0a53d4910536c7c2abb9cb25f4153548efb42800c6a9456764649c0/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:65a126fb4b070d05340a84fc709dd9e7c75d9b063b610ece8a60197a291d0adf", size = 204321, upload-time = "2026-03-06T06:01:53.887Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/31/faa6c5b9d3688715e1ed1bb9d124c384fe2fc1633a409e503ffe1c6398c1/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c7a80a9242963416bd81f99349d5f3fce1843c303bd404f204918b6d75a75fd6", size = 197509, upload-time = "2026-03-06T06:01:56.439Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/a5/c7d9dd1503ffc08950b3260f5d39ec2366dd08254f0900ecbcf3a6197c7c/charset_normalizer-3.4.5-cp313-cp313-win32.whl", hash = "sha256:f1d725b754e967e648046f00c4facc42d414840f5ccc670c5670f59f83693e4f", size = 132284, upload-time = "2026-03-06T06:01:57.812Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/0f/57072b253af40c8aa6636e6de7d75985624c1eb392815b2f934199340a89/charset_normalizer-3.4.5-cp313-cp313-win_amd64.whl", hash = "sha256:e37bd100d2c5d3ba35db9c7c5ba5a9228cbcffe5c4778dc824b164e5257813d7", size = 142630, upload-time = "2026-03-06T06:01:59.062Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/41/1c4b7cc9f13bd9d369ce3bc993e13d374ce25fa38a2663644283ecf422c1/charset_normalizer-3.4.5-cp313-cp313-win_arm64.whl", hash = "sha256:93b3b2cc5cf1b8743660ce77a4f45f3f6d1172068207c1defc779a36eea6bb36", size = 133254, upload-time = "2026-03-06T06:02:00.281Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/be/0f0fd9bb4a7fa4fb5067fb7d9ac693d4e928d306f80a0d02bde43a7c4aee/charset_normalizer-3.4.5-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8197abe5ca1ffb7d91e78360f915eef5addff270f8a71c1fc5be24a56f3e4873", size = 280232, upload-time = "2026-03-06T06:02:01.508Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/02/983b5445e4bef49cd8c9da73a8e029f0825f39b74a06d201bfaa2e55142a/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2aecdb364b8a1802afdc7f9327d55dad5366bc97d8502d0f5854e50712dbc5f", size = 189688, upload-time = "2026-03-06T06:02:02.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/88/152745c5166437687028027dc080e2daed6fe11cfa95a22f4602591c42db/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a66aa5022bf81ab4b1bebfb009db4fd68e0c6d4307a1ce5ef6a26e5878dfc9e4", size = 206833, upload-time = "2026-03-06T06:02:05.127Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/0f/ebc15c8b02af2f19be9678d6eed115feeeccc45ce1f4b098d986c13e8769/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d77f97e515688bd615c1d1f795d540f32542d514242067adcb8ef532504cb9ee", size = 202879, upload-time = "2026-03-06T06:02:06.446Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/9c/71336bff6934418dc8d1e8a1644176ac9088068bc571da612767619c97b3/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01a1ed54b953303ca7e310fafe0fe347aab348bd81834a0bcd602eb538f89d66", size = 195764, upload-time = "2026-03-06T06:02:08.763Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/95/ce92fde4f98615661871bc282a856cf9b8a15f686ba0af012984660d480b/charset_normalizer-3.4.5-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:b2d37d78297b39a9eb9eb92c0f6df98c706467282055419df141389b23f93362", size = 183728, upload-time = "2026-03-06T06:02:10.137Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/e7/f5b4588d94e747ce45ae680f0f242bc2d98dbd4eccfab73e6160b6893893/charset_normalizer-3.4.5-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e71bbb595973622b817c042bd943c3f3667e9c9983ce3d205f973f486fec98a7", size = 192937, upload-time = "2026-03-06T06:02:11.663Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/29/9d94ed6b929bf9f48bf6ede6e7474576499f07c4c5e878fb186083622716/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4cd966c2559f501c6fd69294d082c2934c8dd4719deb32c22961a5ac6db0df1d", size = 192040, upload-time = "2026-03-06T06:02:13.489Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/15/d2/1a093a1cf827957f9445f2fe7298bcc16f8fc5e05c1ed2ad1af0b239035e/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d5e52d127045d6ae01a1e821acfad2f3a1866c54d0e837828538fabe8d9d1bd6", size = 184107, upload-time = "2026-03-06T06:02:14.83Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/7d/82068ce16bd36135df7b97f6333c5d808b94e01d4599a682e2337ed5fd14/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:30a2b1a48478c3428d047ed9690d57c23038dac838a87ad624c85c0a78ebeb39", size = 208310, upload-time = "2026-03-06T06:02:16.165Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/4e/4dfb52307bb6af4a5c9e73e482d171b81d36f522b21ccd28a49656baa680/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:d8ed79b8f6372ca4254955005830fd61c1ccdd8c0fac6603e2c145c61dd95db6", size = 192918, upload-time = "2026-03-06T06:02:18.144Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/a4/159ff7da662cf7201502ca89980b8f06acf3e887b278956646a8aeb178ab/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:c5af897b45fa606b12464ccbe0014bbf8c09191e0a66aab6aa9d5cf6e77e0c94", size = 204615, upload-time = "2026-03-06T06:02:19.821Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/62/0dd6172203cb6b429ffffc9935001fde42e5250d57f07b0c28c6046deb6b/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1088345bcc93c58d8d8f3d783eca4a6e7a7752bbff26c3eee7e73c597c191c2e", size = 197784, upload-time = "2026-03-06T06:02:21.86Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/5e/1aab5cb737039b9c59e63627dc8bbc0d02562a14f831cc450e5f91d84ce1/charset_normalizer-3.4.5-cp314-cp314-win32.whl", hash = "sha256:ee57b926940ba00bca7ba7041e665cc956e55ef482f851b9b65acb20d867e7a2", size = 133009, upload-time = "2026-03-06T06:02:23.289Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/65/e7c6c77d7aaa4c0d7974f2e403e17f0ed2cb0fc135f77d686b916bf1eead/charset_normalizer-3.4.5-cp314-cp314-win_amd64.whl", hash = "sha256:4481e6da1830c8a1cc0b746b47f603b653dadb690bcd851d039ffaefe70533aa", size = 143511, upload-time = "2026-03-06T06:02:26.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/91/52b0841c71f152f563b8e072896c14e3d83b195c188b338d3cc2e582d1d4/charset_normalizer-3.4.5-cp314-cp314-win_arm64.whl", hash = "sha256:97ab7787092eb9b50fb47fa04f24c75b768a606af1bcba1957f07f128a7219e4", size = 133775, upload-time = "2026-03-06T06:02:27.473Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/60/3a621758945513adfd4db86827a5bafcc615f913dbd0b4c2ed64a65731be/charset_normalizer-3.4.5-py3-none-any.whl", hash = "sha256:9db5e3fcdcee89a78c04dffb3fe33c79f77bd741a624946db2591c81b2fc85b0", size = 55455, upload-time = "2026-03-06T06:03:17.827Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/62/c0815c992c9545347aeea7859b50dc9044d147e2e7278329c6e02ac9a616/charset_normalizer-3.4.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ef7fedc7a6ecbe99969cd09632516738a97eeb8bd7258bf8a0f23114c057dab", size = 295154, upload-time = "2026-03-15T18:50:50.88Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/37/bdca6613c2e3c58c7421891d80cc3efa1d32e882f7c4a7ee6039c3fc951a/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4ea868bc28109052790eb2b52a9ab33f3aa7adc02f96673526ff47419490e21", size = 199191, upload-time = "2026-03-15T18:50:52.658Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/92/9934d1bbd69f7f398b38c5dae1cbf9cc672e7c34a4adf7b17c0a9c17d15d/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:836ab36280f21fc1a03c99cd05c6b7af70d2697e374c7af0b61ed271401a72a2", size = 218674, upload-time = "2026-03-15T18:50:54.102Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/af/90/25f6ab406659286be929fd89ab0e78e38aa183fc374e03aa3c12d730af8a/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f1ce721c8a7dfec21fcbdfe04e8f68174183cf4e8188e0645e92aa23985c57ff", size = 215259, upload-time = "2026-03-15T18:50:55.616Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/ef/79a463eb0fff7f96afa04c1d4c51f8fc85426f918db467854bfb6a569ce3/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e28d62a8fc7a1fa411c43bd65e346f3bce9716dc51b897fbe930c5987b402d5", size = 207276, upload-time = "2026-03-15T18:50:57.054Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/72/d0426afec4b71dc159fa6b4e68f868cd5a3ecd918fec5813a15d292a7d10/charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:530d548084c4a9f7a16ed4a294d459b4f229db50df689bfe92027452452943a0", size = 195161, upload-time = "2026-03-15T18:50:58.686Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/18/c82b06a68bfcb6ce55e508225d210c7e6a4ea122bfc0748892f3dc4e8e11/charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30f445ae60aad5e1f8bdbb3108e39f6fbc09f4ea16c815c66578878325f8f15a", size = 203452, upload-time = "2026-03-15T18:51:00.196Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/d6/0c25979b92f8adafdbb946160348d8d44aa60ce99afdc27df524379875cb/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ac2393c73378fea4e52aa56285a3d64be50f1a12395afef9cce47772f60334c2", size = 202272, upload-time = "2026-03-15T18:51:01.703Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/3d/7fea3e8fe84136bebbac715dd1221cc25c173c57a699c030ab9b8900cbb7/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:90ca27cd8da8118b18a52d5f547859cc1f8354a00cd1e8e5120df3e30d6279e5", size = 195622, upload-time = "2026-03-15T18:51:03.526Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/8a/d6f7fd5cb96c58ef2f681424fbca01264461336d2a7fc875e4446b1f1346/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e5a94886bedca0f9b78fecd6afb6629142fd2605aa70a125d49f4edc6037ee6", size = 220056, upload-time = "2026-03-15T18:51:05.269Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/16/50/478cdda782c8c9c3fb5da3cc72dd7f331f031e7f1363a893cdd6ca0f8de0/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:695f5c2823691a25f17bc5d5ffe79fa90972cc34b002ac6c843bb8a1720e950d", size = 203751, upload-time = "2026-03-15T18:51:06.858Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/fc/cc2fcac943939c8e4d8791abfa139f685e5150cae9f94b60f12520feaa9b/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:231d4da14bcd9301310faf492051bee27df11f2bc7549bc0bb41fef11b82daa2", size = 216563, upload-time = "2026-03-15T18:51:08.564Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/b7/a4add1d9a5f68f3d037261aecca83abdb0ab15960a3591d340e829b37298/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a056d1ad2633548ca18ffa2f85c202cfb48b68615129143915b8dc72a806a923", size = 209265, upload-time = "2026-03-15T18:51:10.312Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/18/c094561b5d64a24277707698e54b7f67bd17a4f857bbfbb1072bba07c8bf/charset_normalizer-3.4.6-cp312-cp312-win32.whl", hash = "sha256:c2274ca724536f173122f36c98ce188fd24ce3dad886ec2b7af859518ce008a4", size = 144229, upload-time = "2026-03-15T18:51:11.694Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/20/0567efb3a8fd481b8f34f739ebddc098ed062a59fed41a8d193a61939e8f/charset_normalizer-3.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:c8ae56368f8cc97c7e40a7ee18e1cedaf8e780cd8bc5ed5ac8b81f238614facb", size = 154277, upload-time = "2026-03-15T18:51:13.004Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/15/57/28d79b44b51933119e21f65479d0864a8d5893e494cf5daab15df0247c17/charset_normalizer-3.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:899d28f422116b08be5118ef350c292b36fc15ec2daeb9ea987c89281c7bb5c4", size = 142817, upload-time = "2026-03-15T18:51:14.408Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/1d/4fdabeef4e231153b6ed7567602f3b68265ec4e5b76d6024cf647d43d981/charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f", size = 294823, upload-time = "2026-03-15T18:51:15.755Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/7b/20e809b89c69d37be748d98e84dce6820bf663cf19cf6b942c951a3e8f41/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843", size = 198527, upload-time = "2026-03-15T18:51:17.177Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/a6/4f8d27527d59c039dce6f7622593cdcd3d70a8504d87d09eb11e9fdc6062/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf", size = 218388, upload-time = "2026-03-15T18:51:18.934Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/9b/4770ccb3e491a9bacf1c46cc8b812214fe367c86a96353ccc6daf87b01ec/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8", size = 214563, upload-time = "2026-03-15T18:51:20.374Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/58/a199d245894b12db0b957d627516c78e055adc3a0d978bc7f65ddaf7c399/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9", size = 206587, upload-time = "2026-03-15T18:51:21.807Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/70/3def227f1ec56f5c69dfc8392b8bd63b11a18ca8178d9211d7cc5e5e4f27/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88", size = 194724, upload-time = "2026-03-15T18:51:23.508Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/58/ab/9318352e220c05efd31c2779a23b50969dc94b985a2efa643ed9077bfca5/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84", size = 202956, upload-time = "2026-03-15T18:51:25.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/13/f3550a3ac25b70f87ac98c40d3199a8503676c2f1620efbf8d42095cfc40/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd", size = 201923, upload-time = "2026-03-15T18:51:26.682Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/db/c5c643b912740b45e8eec21de1bbab8e7fc085944d37e1e709d3dcd9d72f/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c", size = 195366, upload-time = "2026-03-15T18:51:28.129Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/67/3b1c62744f9b2448443e0eb160d8b001c849ec3fef591e012eda6484787c/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194", size = 219752, upload-time = "2026-03-15T18:51:29.556Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/98/32ffbaf7f0366ffb0445930b87d103f6b406bc2c271563644bde8a2b1093/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc", size = 203296, upload-time = "2026-03-15T18:51:30.921Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/12/5d308c1bbe60cabb0c5ef511574a647067e2a1f631bc8634fcafaccd8293/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f", size = 215956, upload-time = "2026-03-15T18:51:32.399Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/e9/5f85f6c5e20669dbe56b165c67b0260547dea97dba7e187938833d791687/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2", size = 208652, upload-time = "2026-03-15T18:51:34.214Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/11/897052ea6af56df3eef3ca94edafee410ca699ca0c7b87960ad19932c55e/charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d", size = 143940, upload-time = "2026-03-15T18:51:36.15Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/5c/724b6b363603e419829f561c854b87ed7c7e31231a7908708ac086cdf3e2/charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389", size = 154101, upload-time = "2026-03-15T18:51:37.876Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/a5/7abf15b4c0968e47020f9ca0935fb3274deb87cb288cd187cad92e8cdffd/charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f", size = 143109, upload-time = "2026-03-15T18:51:39.565Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/6f/ffe1e1259f384594063ea1869bfb6be5cdb8bc81020fc36c3636bc8302a1/charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8", size = 294458, upload-time = "2026-03-15T18:51:41.134Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/60/09bb6c13a8c1016c2ed5c6a6488e4ffef506461aa5161662bd7636936fb1/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421", size = 199277, upload-time = "2026-03-15T18:51:42.953Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/50/dcfbb72a5138bbefdc3332e8d81a23494bf67998b4b100703fd15fa52d81/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2", size = 218758, upload-time = "2026-03-15T18:51:44.339Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/b3/d79a9a191bb75f5aa81f3aaaa387ef29ce7cb7a9e5074ba8ea095cc073c2/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30", size = 215299, upload-time = "2026-03-15T18:51:45.871Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/7e/bc8911719f7084f72fd545f647601ea3532363927f807d296a8c88a62c0d/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db", size = 206811, upload-time = "2026-03-15T18:51:47.308Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/40/c430b969d41dda0c465aa36cc7c2c068afb67177bef50905ac371b28ccc7/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8", size = 193706, upload-time = "2026-03-15T18:51:48.849Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/15/e35e0590af254f7df984de1323640ef375df5761f615b6225ba8deb9799a/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815", size = 202706, upload-time = "2026-03-15T18:51:50.257Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/bd/f736f7b9cc5e93a18b794a50346bb16fbfd6b37f99e8f306f7951d27c17c/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a", size = 202497, upload-time = "2026-03-15T18:51:52.012Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/ba/2cc9e3e7dfdf7760a6ed8da7446d22536f3d0ce114ac63dee2a5a3599e62/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43", size = 193511, upload-time = "2026-03-15T18:51:53.723Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/cb/5be49b5f776e5613be07298c80e1b02a2d900f7a7de807230595c85a8b2e/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0", size = 220133, upload-time = "2026-03-15T18:51:55.333Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/43/99f1b5dad345accb322c80c7821071554f791a95ee50c1c90041c157ae99/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1", size = 203035, upload-time = "2026-03-15T18:51:56.736Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/87/9a/62c2cb6a531483b55dddff1a68b3d891a8b498f3ca555fbcf2978e804d9d/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f", size = 216321, upload-time = "2026-03-15T18:51:58.17Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/79/94a010ff81e3aec7c293eb82c28f930918e517bc144c9906a060844462eb/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815", size = 208973, upload-time = "2026-03-15T18:51:59.998Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/57/4ecff6d4ec8585342f0c71bc03efaa99cb7468f7c91a57b105bcd561cea8/charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d", size = 144610, upload-time = "2026-03-15T18:52:02.213Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/94/8434a02d9d7f168c25767c64671fead8d599744a05d6a6c877144c754246/charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f", size = 154962, upload-time = "2026-03-15T18:52:03.658Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/4c/48f2cdbfd923026503dfd67ccea45c94fd8fe988d9056b468579c66ed62b/charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e", size = 143595, upload-time = "2026-03-15T18:52:05.123Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/93/8878be7569f87b14f1d52032946131bcb6ebbd8af3e20446bc04053dc3f1/charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866", size = 314828, upload-time = "2026-03-15T18:52:06.831Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/b6/fae511ca98aac69ecc35cde828b0a3d146325dd03d99655ad38fc2cc3293/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc", size = 208138, upload-time = "2026-03-15T18:52:08.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/57/64caf6e1bf07274a1e0b7c160a55ee9e8c9ec32c46846ce59b9c333f7008/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e", size = 224679, upload-time = "2026-03-15T18:52:10.043Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/cb/9ff5a25b9273ef160861b41f6937f86fae18b0792fe0a8e75e06acb08f1d/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077", size = 223475, upload-time = "2026-03-15T18:52:11.854Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/97/440635fc093b8d7347502a377031f9605a1039c958f3cd18dcacffb37743/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f", size = 215230, upload-time = "2026-03-15T18:52:13.325Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/24/afff630feb571a13f07c8539fbb502d2ab494019492aaffc78ef41f1d1d0/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e", size = 199045, upload-time = "2026-03-15T18:52:14.752Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/17/d1399ecdaf7e0498c327433e7eefdd862b41236a7e484355b8e0e5ebd64b/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484", size = 211658, upload-time = "2026-03-15T18:52:16.278Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/38/16baa0affb957b3d880e5ac2144caf3f9d7de7bc4a91842e447fbb5e8b67/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7", size = 210769, upload-time = "2026-03-15T18:52:17.782Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/34/c531bc6ac4c21da9ddfddb3107be2287188b3ea4b53b70fc58f2a77ac8d8/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff", size = 201328, upload-time = "2026-03-15T18:52:19.553Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/73/a5a1e9ca5f234519c1953608a03fe109c306b97fdfb25f09182babad51a7/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e", size = 225302, upload-time = "2026-03-15T18:52:21.043Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/f6/cd782923d112d296294dea4bcc7af5a7ae0f86ab79f8fefbda5526b6cfc0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659", size = 211127, upload-time = "2026-03-15T18:52:22.491Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/c5/0b6898950627af7d6103a449b22320372c24c6feda91aa24e201a478d161/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602", size = 222840, upload-time = "2026-03-15T18:52:24.113Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/25/c4bba773bef442cbdc06111d40daa3de5050a676fa26e85090fc54dd12f0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407", size = 216890, upload-time = "2026-03-15T18:52:25.541Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/1a/05dacadb0978da72ee287b0143097db12f2e7e8d3ffc4647da07a383b0b7/charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579", size = 155379, upload-time = "2026-03-15T18:52:27.05Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/7a/d269d834cb3a76291651256f3b9a5945e81d0a49ab9f4a498964e83c0416/charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4", size = 169043, upload-time = "2026-03-15T18:52:28.502Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/06/28b29fba521a37a8932c6a84192175c34d49f84a6d4773fa63d05f9aff22/charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c", size = 148523, upload-time = "2026-03-15T18:52:29.956Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/68/687187c7e26cb24ccbd88e5069f5ef00eba804d36dde11d99aad0838ab45/charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69", size = 61455, upload-time = "2026-03-15T18:53:23.833Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -402,7 +418,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "cyclopts"
|
||||
version = "4.9.0"
|
||||
version = "4.10.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "attrs" },
|
||||
@@ -410,9 +426,9 @@ dependencies = [
|
||||
{ name = "rich" },
|
||||
{ name = "rich-rst" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/75/de/75598ddea1f47589ccecdb23a560715a5a8ec2b3e34396b5628ba98d70e4/cyclopts-4.9.0.tar.gz", hash = "sha256:f292868e4be33a3e622d8cf95d89f49222e987b1ccdbf40caf6514e19dd99a63", size = 166300, upload-time = "2026-03-13T13:43:40.38Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2c/e7/3e26855c046ac527cf94d890f6698e703980337f22ea7097e02b35b910f9/cyclopts-4.10.0.tar.gz", hash = "sha256:0ae04a53274e200ef3477c8b54de63b019bc6cd0162d75c718bf40c9c3fb5268", size = 166394, upload-time = "2026-03-14T14:09:31.043Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/b2/2e342a876e5b78ce99ecf65ce391f5b2935144a0528c9989c437b8578a54/cyclopts-4.9.0-py3-none-any.whl", hash = "sha256:583ea4090a040c92f9303bc0da26bca7b681c81bcea34097ace279e1acef22c1", size = 203999, upload-time = "2026-03-13T13:43:38.553Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/06/d68a5d5d292c2ad2bc6a02e5ca2cb1bb9c15e941ab02f004a06a342d7f0f/cyclopts-4.10.0-py3-none-any.whl", hash = "sha256:50f333382a60df8d40ec14aa2e627316b361c4f478598ada1f4169d959bf9ea7", size = 204097, upload-time = "2026-03-14T14:09:32.504Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -485,7 +501,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "fastmcp"
|
||||
version = "3.1.0"
|
||||
version = "3.1.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "authlib" },
|
||||
@@ -510,9 +526,9 @@ dependencies = [
|
||||
{ name = "watchfiles" },
|
||||
{ name = "websockets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0a/70/862026c4589441f86ad3108f05bfb2f781c6b322ad60a982f40b303b47d7/fastmcp-3.1.0.tar.gz", hash = "sha256:e25264794c734b9977502a51466961eeecff92a0c2f3b49c40c070993628d6d0", size = 17347083, upload-time = "2026-03-03T02:43:11.283Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/25/83/c95d3bf717698a693eccb43e137a32939d2549876e884e246028bff6ecce/fastmcp-3.1.1.tar.gz", hash = "sha256:db184b5391a31199323766a3abf3a8bfbb8010479f77eca84c0e554f18655c48", size = 17347644, upload-time = "2026-03-14T19:12:20.235Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/17/07/516f5b20d88932e5a466c2216b628e5358a71b3a9f522215607c3281de05/fastmcp-3.1.0-py3-none-any.whl", hash = "sha256:b1f73b56fd3b0cb2bd9e2a144fc650d5cc31587ed129d996db7710e464ae8010", size = 633749, upload-time = "2026-03-03T02:43:09.06Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/ea/570122de7e24f72138d006f799768e14cc1ccf7fcb22b7750b2bd276c711/fastmcp-3.1.1-py3-none-any.whl", hash = "sha256:8132ba069d89f14566b3266919d6d72e2ec23dd45d8944622dca407e9beda7eb", size = 633754, upload-time = "2026-03-14T19:12:22.736Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1556,7 +1572,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "unraid-mcp"
|
||||
version = "0.6.0"
|
||||
version = "1.1.2"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "fastapi" },
|
||||
@@ -1585,7 +1601,7 @@ dev = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "fastapi", specifier = ">=0.115.0" },
|
||||
{ name = "fastmcp", specifier = ">=2.14.5" },
|
||||
{ name = "fastmcp", specifier = ">=3.0.0" },
|
||||
{ name = "httpx", specifier = ">=0.28.1" },
|
||||
{ name = "python-dotenv", specifier = ">=1.1.1" },
|
||||
{ name = "rich", specifier = ">=14.1.0" },
|
||||
@@ -1618,15 +1634,15 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "uvicorn"
|
||||
version = "0.41.0"
|
||||
version = "0.42.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "click" },
|
||||
{ name = "h11" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/32/ce/eeb58ae4ac36fe09e3842eb02e0eb676bf2c53ae062b98f1b2531673efdd/uvicorn-0.41.0.tar.gz", hash = "sha256:09d11cf7008da33113824ee5a1c6422d89fbc2ff476540d69a34c87fab8b571a", size = 82633, upload-time = "2026-02-16T23:07:24.1Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e3/ad/4a96c425be6fb67e0621e62d86c402b4a17ab2be7f7c055d9bd2f638b9e2/uvicorn-0.42.0.tar.gz", hash = "sha256:9b1f190ce15a2dd22e7758651d9b6d12df09a13d51ba5bf4fc33c383a48e1775", size = 85393, upload-time = "2026-03-16T06:19:50.077Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/83/e4/d04a086285c20886c0daad0e026f250869201013d18f81d9ff5eada73a88/uvicorn-0.41.0-py3-none-any.whl", hash = "sha256:29e35b1d2c36a04b9e180d4007ede3bcb32a85fbdfd6c6aeb3f26839de088187", size = 68783, upload-time = "2026-02-16T23:07:22.357Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/89/f8827ccff89c1586027a105e5630ff6139a64da2515e24dafe860bd9ae4d/uvicorn-0.42.0-py3-none-any.whl", hash = "sha256:96c30f5c7abe6f74ae8900a70e92b85ad6613b745d4879eb9b16ccad15645359", size = 68830, upload-time = "2026-03-16T06:19:48.325Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
|
||||
Reference in New Issue
Block a user