forked from HomeLab/unraid-mcp
Compare commits
119 Commits
main
...
refactor/c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7db878b80b | ||
|
|
3888b9cb4a | ||
|
|
cf9449a15d | ||
|
|
884319ab11 | ||
|
|
efaab031ae | ||
|
|
dab1cd6995 | ||
|
|
faf9fb9ad7 | ||
|
|
fe7b6485fd | ||
|
|
d7545869e2 | ||
|
|
cdab970c12 | ||
|
|
80d2dd39ee | ||
|
|
aa5fa3e177 | ||
|
|
a3ea468bd9 | ||
|
|
4b43c47091 | ||
|
|
c37d4b1c5a | ||
|
|
06368ce156 | ||
|
|
9249950dff | ||
|
|
16bb5a6146 | ||
|
|
83df768135 | ||
|
|
569956ade0 | ||
|
|
f5978d67ec | ||
|
|
7c99fe1527 | ||
|
|
e87a33ef1a | ||
|
|
389b88f560 | ||
|
|
94850333e8 | ||
|
|
252ec520d1 | ||
|
|
1f35c20cdf | ||
|
|
b68347bc1e | ||
|
|
6eafc16af7 | ||
|
|
ebba60c095 | ||
|
|
2b4b1f0395 | ||
|
|
d26467a4d0 | ||
|
|
76391b4d2b | ||
|
|
0d4a3fa4e2 | ||
|
|
3a72f6c6b9 | ||
|
|
675a466d02 | ||
|
|
5a3e8e285b | ||
|
|
181ad53414 | ||
|
|
72ccf9b074 | ||
|
|
5740f4848a | ||
|
|
4ce3edd423 | ||
|
|
2c0f4a1730 | ||
|
|
a3754e37c3 | ||
|
|
c4f1b2eb00 | ||
|
|
c80ab0ca6b | ||
|
|
08afdcc50e | ||
|
|
ba7b8dfaa6 | ||
|
|
23e70e46d0 | ||
|
|
fe66e8742c | ||
|
|
77f3d897a3 | ||
|
|
8c67145bcc | ||
|
|
9fc85ea48c | ||
|
|
d99855973a | ||
|
|
9435a8c534 | ||
|
|
81f1fe174d | ||
|
|
e930b868e4 | ||
|
|
d8ce45c0fc | ||
|
|
14e9dca8bc | ||
|
|
f0a97edbf7 | ||
|
|
85cd173449 | ||
|
|
e1c80cf1da | ||
|
|
ba14a8d341 | ||
|
|
cec254b432 | ||
|
|
dec80832ea | ||
|
|
4b4c8ddf63 | ||
|
|
dfcaa37614 | ||
|
|
060acab239 | ||
|
|
be186dc2d7 | ||
|
|
13f85bd499 | ||
|
|
49264550b1 | ||
|
|
9be46750b8 | ||
|
|
61604b313f | ||
|
|
8a986a84c2 | ||
|
|
02e61b4290 | ||
|
|
e73f791fd3 | ||
|
|
7458409147 | ||
|
|
42bfcc1998 | ||
|
|
a7988e1eae | ||
|
|
520d92af57 | ||
|
|
1952720ef9 | ||
|
|
ea839ec09c | ||
|
|
b734eff902 | ||
|
|
3f13cf89c8 | ||
|
|
af3b5818dc | ||
|
|
d47101f8f7 | ||
|
|
d0cc99711a | ||
|
|
91bce1dbd5 | ||
|
|
7bb9d93bd5 | ||
|
|
5a0b99d138 | ||
|
|
a07dbd2294 | ||
|
|
85d52094ea | ||
|
|
7ef21b8051 | ||
|
|
a2c9cbfbeb | ||
|
|
3ae85e1df7 | ||
|
|
8eab5992ba | ||
|
|
4ed78b4867 | ||
|
|
a5ed5aab5f | ||
|
|
e24ef5e85d | ||
|
|
6fe6e30f1e | ||
|
|
37b6150aed | ||
|
|
482da4485d | ||
|
|
0e4365bd4b | ||
|
|
9026faaa7c | ||
|
|
ac745bec42 | ||
|
|
d76bfb889d | ||
|
|
c913e6bce9 | ||
|
|
0283006374 | ||
|
|
cd33ee2dda | ||
|
|
9aee3a2448 | ||
|
|
4af1e74b4a | ||
|
|
ac5639301c | ||
|
|
bdb2155366 | ||
|
|
60defc35ca | ||
|
|
06f18f32fc | ||
|
|
2a5b19c42f | ||
|
|
1751bc2984 | ||
|
|
348f4149a5 | ||
|
|
f76e676fd4 | ||
|
|
316193c04b |
@@ -31,32 +31,34 @@ This directory contains the Claude Code marketplace configuration for the Unraid
|
||||
Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring.
|
||||
|
||||
**Features:**
|
||||
- 10 tools with 76 actions (queries and mutations)
|
||||
- Real-time system metrics
|
||||
- 1 consolidated `unraid` tool with ~108 actions across 15 domains
|
||||
- Real-time live subscriptions (CPU, memory, logs, array state, UPS)
|
||||
- Disk health and temperature monitoring
|
||||
- Docker container management
|
||||
- VM status and control
|
||||
- Log file access
|
||||
- Network share information
|
||||
- Notification management
|
||||
- Plugin, rclone, API key, and OIDC management
|
||||
|
||||
**Version:** 0.2.0
|
||||
**Version:** 1.0.0
|
||||
**Category:** Infrastructure
|
||||
**Tags:** unraid, monitoring, homelab, graphql, docker, virtualization
|
||||
|
||||
## Configuration
|
||||
|
||||
After installation, configure your Unraid server credentials:
|
||||
After installation, run setup to configure credentials interactively:
|
||||
|
||||
```bash
|
||||
export UNRAID_API_URL="https://your-unraid-server/graphql"
|
||||
export UNRAID_API_KEY="your-api-key"
|
||||
```python
|
||||
unraid(action="health", subaction="setup")
|
||||
```
|
||||
|
||||
Credentials are stored at `~/.unraid-mcp/.env` automatically.
|
||||
|
||||
**Getting an API Key:**
|
||||
1. Open Unraid WebUI
|
||||
2. Go to Settings → Management Access → API Keys
|
||||
3. Click "Create" and select "Viewer" role
|
||||
3. Click "Create" and select "Viewer" role (or appropriate roles for mutations)
|
||||
4. Copy the generated API key
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
"email": "jmagar@users.noreply.github.com"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Comprehensive Unraid server management and monitoring tools via GraphQL API",
|
||||
"version": "0.2.0",
|
||||
"description": "Comprehensive Unraid server management and monitoring via a single consolidated MCP tool (~108 actions across 15 domains)",
|
||||
"version": "1.0.0",
|
||||
"homepage": "https://github.com/jmagar/unraid-mcp",
|
||||
"repository": "https://github.com/jmagar/unraid-mcp"
|
||||
},
|
||||
@@ -14,8 +14,8 @@
|
||||
{
|
||||
"name": "unraid",
|
||||
"source": "./",
|
||||
"description": "Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring",
|
||||
"version": "0.2.0",
|
||||
"description": "Query and monitor Unraid servers via GraphQL API — single `unraid` tool with action+subaction routing for array, disk, docker, VM, notifications, live metrics, and more",
|
||||
"version": "1.0.0",
|
||||
"tags": ["unraid", "monitoring", "homelab", "graphql", "docker", "virtualization"],
|
||||
"category": "infrastructure"
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "unraid",
|
||||
"description": "Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring",
|
||||
"version": "0.2.0",
|
||||
"version": "1.0.1",
|
||||
"author": {
|
||||
"name": "jmagar",
|
||||
"email": "jmagar@users.noreply.github.com"
|
||||
|
||||
@@ -21,3 +21,11 @@ venv/
|
||||
env/
|
||||
.vscode/
|
||||
cline_docs/
|
||||
tests/
|
||||
docs/
|
||||
scripts/
|
||||
commands/
|
||||
.full-review/
|
||||
.claude-plugin/
|
||||
*.md
|
||||
!README.md
|
||||
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -10,6 +10,9 @@ wheels/
|
||||
# Tool artifacts (pytest, ruff, ty, coverage all write here)
|
||||
.cache/
|
||||
|
||||
# Hypothesis example database (machine-local, auto-regenerated)
|
||||
.hypothesis/
|
||||
|
||||
# Legacy artifact locations (in case tools run outside pyproject config)
|
||||
.pytest_cache/
|
||||
.ruff_cache/
|
||||
@@ -34,6 +37,13 @@ logs/
|
||||
# IDE/Editor
|
||||
.bivvy
|
||||
.cursor
|
||||
.windsurf/
|
||||
.1code/
|
||||
.emdash.json
|
||||
|
||||
# Backup files
|
||||
*.bak
|
||||
*.bak-*
|
||||
|
||||
# Claude Code user settings (gitignore local settings)
|
||||
.claude/settings.local.json
|
||||
@@ -41,12 +51,17 @@ logs/
|
||||
# Serena IDE configuration
|
||||
.serena/
|
||||
|
||||
# Claude Code worktrees (temporary agent isolation dirs)
|
||||
.claude/worktrees/
|
||||
|
||||
# Documentation and session artifacts
|
||||
.docs/
|
||||
.full-review/
|
||||
/docs/plans/
|
||||
/docs/sessions/
|
||||
/docs/reports/
|
||||
/docs/research/
|
||||
/docs/superpowers/
|
||||
|
||||
# Test planning documents
|
||||
/DESTRUCTIVE_ACTIONS.md
|
||||
|
||||
130
CLAUDE.md
130
CLAUDE.md
@@ -83,25 +83,54 @@ docker compose down
|
||||
- **Data Processing**: Tools return both human-readable summaries and detailed raw data
|
||||
- **Health Monitoring**: Comprehensive health check tool for system monitoring
|
||||
- **Real-time Subscriptions**: WebSocket-based live data streaming
|
||||
- **Persistent Subscription Manager**: `live` action subactions use a shared `SubscriptionManager`
|
||||
that maintains persistent WebSocket connections. Resources serve cached data via
|
||||
`subscription_manager.get_resource_data(action)`. A "connecting" placeholder is returned
|
||||
while the subscription starts — callers should retry in a moment. When
|
||||
`UNRAID_AUTO_START_SUBSCRIPTIONS=false`, resources fall back to on-demand `subscribe_once`.
|
||||
|
||||
### Tool Categories (10 Tools, 76 Actions)
|
||||
1. **`unraid_info`** (19 actions): overview, array, network, registration, connect, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config
|
||||
2. **`unraid_array`** (5 actions): parity_start, parity_pause, parity_resume, parity_cancel, parity_status
|
||||
3. **`unraid_storage`** (6 actions): shares, disks, disk_details, unassigned, log_files, logs
|
||||
4. **`unraid_docker`** (15 actions): list, details, start, stop, restart, pause, unpause, remove, update, update_all, logs, networks, network_details, port_conflicts, check_updates
|
||||
5. **`unraid_vm`** (9 actions): list, details, start, stop, pause, resume, force_stop, reboot, reset
|
||||
6. **`unraid_notifications`** (9 actions): overview, list, warnings, create, archive, unread, delete, delete_archived, archive_all
|
||||
7. **`unraid_rclone`** (4 actions): list_remotes, config_form, create_remote, delete_remote
|
||||
8. **`unraid_users`** (1 action): me
|
||||
9. **`unraid_keys`** (5 actions): list, get, create, update, delete
|
||||
10. **`unraid_health`** (3 actions): check, test_connection, diagnose
|
||||
### Tool Categories (1 Tool, ~107 Subactions)
|
||||
|
||||
The server registers a **single consolidated `unraid` tool** with `action` (domain) + `subaction` (operation) routing. Call it as `unraid(action="docker", subaction="list")`.
|
||||
|
||||
| action | subactions |
|
||||
|--------|-----------|
|
||||
| **system** (19) | overview, array, network, registration, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config |
|
||||
| **health** (4) | check, test_connection, diagnose, setup |
|
||||
| **array** (13) | parity_status, parity_history, parity_start, parity_pause, parity_resume, parity_cancel, start_array, stop_array*, add_disk, remove_disk*, mount_disk, unmount_disk, clear_disk_stats* |
|
||||
| **disk** (6) | shares, disks, disk_details, log_files, logs, flash_backup* |
|
||||
| **docker** (7) | list, details, start, stop, restart, networks, network_details |
|
||||
| **vm** (9) | list, details, start, stop, pause, resume, force_stop*, reboot, reset* |
|
||||
| **notification** (12) | overview, list, create, archive, mark_unread, recalculate, archive_all, archive_many, unarchive_many, unarchive_all, delete*, delete_archived* |
|
||||
| **key** (7) | list, get, create, update, delete*, add_role, remove_role |
|
||||
| **plugin** (3) | list, add, remove* |
|
||||
| **rclone** (4) | list_remotes, config_form, create_remote, delete_remote* |
|
||||
| **setting** (2) | update, configure_ups* |
|
||||
| **customization** (5) | theme, public_theme, is_initial_setup, sso_enabled, set_theme |
|
||||
| **oidc** (5) | providers, provider, configuration, public_providers, validate_session |
|
||||
| **user** (1) | me |
|
||||
| **live** (11) | cpu, memory, cpu_telemetry, array_state, parity_progress, ups_status, notifications_overview, notification_feed, log_tail, owner, server_status |
|
||||
|
||||
`*` = destructive, requires `confirm=True`
|
||||
|
||||
### Destructive Actions (require `confirm=True`)
|
||||
- **array**: stop_array, remove_disk, clear_disk_stats
|
||||
- **vm**: force_stop, reset
|
||||
- **notifications**: delete, delete_archived
|
||||
- **rclone**: delete_remote
|
||||
- **keys**: delete
|
||||
- **disk**: flash_backup
|
||||
- **settings**: configure_ups
|
||||
- **plugins**: remove
|
||||
|
||||
### Environment Variable Hierarchy
|
||||
The server loads environment variables from multiple locations in order:
|
||||
1. `/app/.env.local` (container mount)
|
||||
2. `../.env.local` (project root)
|
||||
3. `../.env` (project root)
|
||||
4. `.env` (local directory)
|
||||
1. `~/.unraid-mcp/.env` (primary — canonical credentials dir, all runtimes)
|
||||
2. `~/.unraid-mcp/.env.local` (local overrides, only used if primary is absent)
|
||||
3. `/app/.env.local` (Docker container mount)
|
||||
4. `../.env.local` (project root local overrides)
|
||||
5. `../.env` (project root fallback)
|
||||
6. `unraid_mcp/.env` (last resort)
|
||||
|
||||
### Transport Configuration
|
||||
- **streamable-http** (recommended): HTTP-based transport on `/mcp` endpoint
|
||||
@@ -119,3 +148,74 @@ The server loads environment variables from multiple locations in order:
|
||||
- Selective queries to avoid GraphQL type overflow issues
|
||||
- Optional caching controls for Docker container queries
|
||||
- Log file overwrite at 10MB cap to prevent disk space issues
|
||||
|
||||
## Critical Gotchas
|
||||
|
||||
### Mutation Handler Ordering
|
||||
**Mutation handlers MUST return before the domain query dict lookup.** Mutations are not in the domain `_*_QUERIES` dicts (e.g., `_DOCKER_QUERIES`, `_ARRAY_QUERIES`) — reaching that line for a mutation subaction causes a `KeyError`. Always add early-return `if subaction == "mutation_name": ... return` blocks BEFORE the queries lookup.
|
||||
|
||||
### Test Patching
|
||||
- Patch at the **tool module level**: `unraid_mcp.tools.unraid.make_graphql_request` (not core)
|
||||
- `conftest.py`'s `mock_graphql_request` patches the core module — wrong for tool-level tests
|
||||
- Use `conftest.py`'s `make_tool_fn()` helper or local `_make_tool()` pattern
|
||||
|
||||
### Test Suite Structure
|
||||
```
|
||||
tests/
|
||||
├── conftest.py # Shared fixtures + make_tool_fn() helper
|
||||
├── test_*.py # Unit tests (mock at tool module level)
|
||||
├── http_layer/ # httpx-level request/response tests (respx)
|
||||
├── integration/ # WebSocket subscription lifecycle tests (slow)
|
||||
├── safety/ # Destructive action guard tests
|
||||
└── schema/ # GraphQL query validation (99 tests, all passing)
|
||||
```
|
||||
|
||||
### Running Targeted Tests
|
||||
```bash
|
||||
uv run pytest tests/safety/ # Destructive action guards only
|
||||
uv run pytest tests/schema/ # GraphQL query validation only
|
||||
uv run pytest tests/http_layer/ # HTTP/httpx layer only
|
||||
uv run pytest tests/test_docker.py # Single tool only
|
||||
uv run pytest -x # Fail fast on first error
|
||||
```
|
||||
|
||||
### Scripts
|
||||
```bash
|
||||
# HTTP smoke-test against a live server (11 tools, all non-destructive actions)
|
||||
./tests/mcporter/test-actions.sh [MCP_URL] # default: http://localhost:6970/mcp
|
||||
|
||||
# stdio smoke-test, no running server needed (good for CI)
|
||||
./tests/mcporter/test-tools.sh [--parallel] [--timeout-ms N] [--verbose]
|
||||
|
||||
# Destructive action smoke-test (confirms guard blocks without confirm=True)
|
||||
./tests/mcporter/test-destructive.sh [MCP_URL]
|
||||
```
|
||||
See `tests/mcporter/README.md` for transport differences and `docs/DESTRUCTIVE_ACTIONS.md` for exact destructive-action test commands.
|
||||
|
||||
### API Reference Docs
|
||||
- `docs/UNRAID_API_COMPLETE_REFERENCE.md` — Full GraphQL schema reference
|
||||
- `docs/UNRAID_API_OPERATIONS.md` — All supported operations with examples
|
||||
|
||||
Use these when adding new queries/mutations.
|
||||
|
||||
### Version Bumps
|
||||
When bumping the version, **always update both files** — they must stay in sync:
|
||||
- `pyproject.toml` → `version = "X.Y.Z"` under `[project]`
|
||||
- `.claude-plugin/plugin.json` → `"version": "X.Y.Z"`
|
||||
|
||||
### Credential Storage (`~/.unraid-mcp/.env`)
|
||||
All runtimes (plugin, direct, Docker) load credentials from `~/.unraid-mcp/.env`.
|
||||
- **Plugin/direct:** `unraid action=health subaction=setup` writes this file automatically via elicitation,
|
||||
**Safe to re-run**: always prompts for confirmation before overwriting existing credentials,
|
||||
whether the connection is working or not (failed probe may be a transient outage, not bad creds).
|
||||
or manual: `mkdir -p ~/.unraid-mcp && cp .env.example ~/.unraid-mcp/.env` then edit.
|
||||
- **Docker:** `docker-compose.yml` loads it via `env_file` before container start.
|
||||
- **No symlinks needed.** Version bumps do not affect this path.
|
||||
- **Permissions:** dir=700, file=600 (set automatically by elicitation; set manually if
|
||||
using `cp`: `chmod 700 ~/.unraid-mcp && chmod 600 ~/.unraid-mcp/.env`).
|
||||
|
||||
### Symlinks
|
||||
`AGENTS.md` and `GEMINI.md` are symlinks to `CLAUDE.md` for Codex/Gemini compatibility:
|
||||
```bash
|
||||
ln -sf CLAUDE.md AGENTS.md && ln -sf CLAUDE.md GEMINI.md
|
||||
```
|
||||
|
||||
31
Dockerfile
31
Dockerfile
@@ -1,19 +1,28 @@
|
||||
# Use an official Python runtime as a parent image
|
||||
FROM python:3.11-slim
|
||||
FROM python:3.12-slim
|
||||
|
||||
# Set the working directory in the container
|
||||
WORKDIR /app
|
||||
|
||||
# Install uv
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /usr/local/bin/
|
||||
# Install uv (pinned tag to avoid mutable latest)
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.9.25 /uv /uvx /usr/local/bin/
|
||||
|
||||
# Copy dependency files
|
||||
COPY pyproject.toml .
|
||||
COPY uv.lock .
|
||||
COPY README.md .
|
||||
# Create non-root user with home directory and give ownership of /app
|
||||
RUN groupadd --gid 1000 appuser && \
|
||||
useradd --uid 1000 --gid 1000 --create-home --shell /bin/false appuser && \
|
||||
chown appuser:appuser /app
|
||||
|
||||
# Copy dependency files (owned by appuser via --chown)
|
||||
COPY --chown=appuser:appuser pyproject.toml .
|
||||
COPY --chown=appuser:appuser uv.lock .
|
||||
COPY --chown=appuser:appuser README.md .
|
||||
COPY --chown=appuser:appuser LICENSE .
|
||||
|
||||
# Copy the source code
|
||||
COPY unraid_mcp/ ./unraid_mcp/
|
||||
COPY --chown=appuser:appuser unraid_mcp/ ./unraid_mcp/
|
||||
|
||||
# Switch to non-root user before installing dependencies
|
||||
USER appuser
|
||||
|
||||
# Install dependencies and the package
|
||||
RUN uv sync --frozen
|
||||
@@ -31,5 +40,9 @@ ENV UNRAID_API_KEY=""
|
||||
ENV UNRAID_VERIFY_SSL="true"
|
||||
ENV UNRAID_MCP_LOG_LEVEL="INFO"
|
||||
|
||||
# Run unraid-mcp-server.py when the container launches
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||
CMD ["python", "-c", "import os, urllib.request; port = os.getenv('UNRAID_MCP_PORT', '6970'); urllib.request.urlopen(f'http://localhost:{port}/mcp')"]
|
||||
|
||||
# Run unraid-mcp-server when the container launches
|
||||
CMD ["uv", "run", "unraid-mcp-server"]
|
||||
|
||||
234
README.md
234
README.md
@@ -1,17 +1,17 @@
|
||||
# 🚀 Unraid MCP Server
|
||||
|
||||
[](https://www.python.org/downloads/)
|
||||
[](https://github.com/jlowin/fastmcp)
|
||||
[](https://github.com/jlowin/fastmcp)
|
||||
[](LICENSE)
|
||||
|
||||
**A powerful MCP (Model Context Protocol) server that provides comprehensive tools to interact with an Unraid server's GraphQL API.**
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- 🔧 **10 Tools, 90 Actions**: Complete Unraid management through MCP protocol
|
||||
- 🏗️ **Modular Architecture**: Clean, maintainable, and extensible codebase
|
||||
- 🔧 **1 Tool, ~108 Actions**: Complete Unraid management through a single consolidated MCP tool
|
||||
- 🏗️ **Modular Architecture**: Clean, maintainable, and extensible codebase
|
||||
- ⚡ **High Performance**: Async/concurrent operations with optimized timeouts
|
||||
- 🔄 **Real-time Data**: WebSocket subscriptions for live log streaming
|
||||
- 🔄 **Real-time Data**: WebSocket subscriptions for live metrics, logs, array state, and more
|
||||
- 📊 **Health Monitoring**: Comprehensive system diagnostics and status
|
||||
- 🐳 **Docker Ready**: Full containerization support with Docker Compose
|
||||
- 🔒 **Secure**: Proper SSL/TLS configuration and API key management
|
||||
@@ -26,7 +26,6 @@
|
||||
- [Installation](#-installation)
|
||||
- [Configuration](#-configuration)
|
||||
- [Available Tools & Resources](#-available-tools--resources)
|
||||
- [Custom Slash Commands](#-custom-slash-commands)
|
||||
- [Development](#-development)
|
||||
- [Architecture](#-architecture)
|
||||
- [Troubleshooting](#-troubleshooting)
|
||||
@@ -46,14 +45,39 @@
|
||||
```
|
||||
|
||||
This provides instant access to Unraid monitoring and management through Claude Code with:
|
||||
- **10 MCP tools** exposing **83 actions** via the consolidated action pattern
|
||||
- **10 slash commands** for quick CLI-style access (`commands/`)
|
||||
- **1 MCP tool** (`unraid`) exposing **~108 actions** via `action` + `subaction` routing
|
||||
- Real-time system metrics and health monitoring
|
||||
- Docker container and VM lifecycle management
|
||||
- Disk health monitoring and storage management
|
||||
|
||||
**See [.claude-plugin/README.md](.claude-plugin/README.md) for detailed plugin documentation.**
|
||||
|
||||
### ⚙️ Credential Setup
|
||||
|
||||
Credentials are stored in `~/.unraid-mcp/.env` — one location that works for the
|
||||
Claude Code plugin, direct `uv run` invocations, and Docker.
|
||||
|
||||
**Option 1 — Interactive (Claude Code plugin, elicitation-supported clients):**
|
||||
```
|
||||
unraid(action="health", subaction="setup")
|
||||
```
|
||||
The server prompts for your API URL and key, writes `~/.unraid-mcp/.env` automatically
|
||||
(created with mode 700/600), and activates credentials without restart.
|
||||
|
||||
**Option 2 — Manual:**
|
||||
```bash
|
||||
mkdir -p ~/.unraid-mcp && chmod 700 ~/.unraid-mcp
|
||||
cp .env.example ~/.unraid-mcp/.env && chmod 600 ~/.unraid-mcp/.env
|
||||
# Edit ~/.unraid-mcp/.env with your values:
|
||||
# UNRAID_API_URL=https://10-1-0-2.xxx.myunraid.net:31337
|
||||
# UNRAID_API_KEY=your-key-from-unraid-settings
|
||||
```
|
||||
|
||||
**Docker:** `~/.unraid-mcp/.env` is loaded via `env_file` in `docker-compose.yml` —
|
||||
same file, no duplication needed.
|
||||
|
||||
> **Finding your API key:** Unraid → Settings → Management Access → API Keys
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Quick Start
|
||||
@@ -71,8 +95,13 @@ cd unraid-mcp
|
||||
|
||||
### 2. Configure Environment
|
||||
```bash
|
||||
# For Docker/production use — canonical credential location (all runtimes)
|
||||
mkdir -p ~/.unraid-mcp && chmod 700 ~/.unraid-mcp
|
||||
cp .env.example ~/.unraid-mcp/.env && chmod 600 ~/.unraid-mcp/.env
|
||||
# Edit ~/.unraid-mcp/.env with your values
|
||||
|
||||
# For local development only
|
||||
cp .env.example .env
|
||||
# Edit .env with your Unraid API details
|
||||
```
|
||||
|
||||
### 3. Deploy with Docker (Recommended)
|
||||
@@ -104,15 +133,13 @@ unraid-mcp/ # ${CLAUDE_PLUGIN_ROOT}
|
||||
├── .claude-plugin/
|
||||
│ ├── marketplace.json # Marketplace catalog
|
||||
│ └── plugin.json # Plugin manifest
|
||||
├── commands/ # 10 custom slash commands
|
||||
├── unraid_mcp/ # MCP server Python package
|
||||
├── skills/unraid/ # Skill and documentation
|
||||
├── pyproject.toml # Dependencies and entry points
|
||||
└── scripts/ # Validation and helper scripts
|
||||
```
|
||||
|
||||
- **MCP Server**: 10 tools with 76 actions via GraphQL API
|
||||
- **Slash Commands**: 10 commands in `commands/` for quick CLI-style access
|
||||
- **MCP Server**: 1 `unraid` tool with ~108 actions via GraphQL API
|
||||
- **Skill**: `/unraid` skill for monitoring and queries
|
||||
- **Entry Point**: `unraid-mcp-server` defined in pyproject.toml
|
||||
|
||||
@@ -197,11 +224,15 @@ UNRAID_MCP_PORT=6970
|
||||
UNRAID_MCP_LOG_LEVEL=INFO # DEBUG, INFO, WARNING, ERROR
|
||||
UNRAID_MCP_LOG_FILE=unraid-mcp.log
|
||||
|
||||
# SSL/TLS Configuration
|
||||
# SSL/TLS Configuration
|
||||
UNRAID_VERIFY_SSL=true # true, false, or path to CA bundle
|
||||
|
||||
# Subscription Configuration
|
||||
UNRAID_AUTO_START_SUBSCRIPTIONS=true # Auto-start WebSocket subscriptions on startup (default: true)
|
||||
UNRAID_MAX_RECONNECT_ATTEMPTS=5 # Max WebSocket reconnection attempts (default: 5)
|
||||
|
||||
# Optional: Log Stream Configuration
|
||||
# UNRAID_AUTOSTART_LOG_PATH=/var/log/syslog # Path for log streaming resource
|
||||
# UNRAID_AUTOSTART_LOG_PATH=/var/log/syslog # Path for log streaming resource (unraid://logs/stream)
|
||||
```
|
||||
|
||||
### Transport Options
|
||||
@@ -216,85 +247,60 @@ UNRAID_VERIFY_SSL=true # true, false, or path to CA bundle
|
||||
|
||||
## 🛠️ Available Tools & Resources
|
||||
|
||||
Each tool uses a consolidated `action` parameter to expose multiple operations, reducing context window usage. Destructive actions require `confirm=True`.
|
||||
The single `unraid` tool uses `action` (domain) + `subaction` (operation) routing to expose all operations via one MCP tool, minimizing context window usage. Destructive actions require `confirm=True`.
|
||||
|
||||
### Tool Categories (10 Tools, 76 Actions)
|
||||
### Single Tool, 15 Domains, ~108 Actions
|
||||
|
||||
| Tool | Actions | Description |
|
||||
|------|---------|-------------|
|
||||
| **`unraid_info`** | 19 | overview, array, network, registration, connect, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config |
|
||||
| **`unraid_array`** | 5 | parity_start, parity_pause, parity_resume, parity_cancel, parity_status |
|
||||
| **`unraid_storage`** | 6 | shares, disks, disk_details, unassigned, log_files, logs |
|
||||
| **`unraid_docker`** | 15 | list, details, start, stop, restart, pause, unpause, remove, update, update_all, logs, networks, network_details, port_conflicts, check_updates |
|
||||
| **`unraid_vm`** | 9 | list, details, start, stop, pause, resume, force_stop, reboot, reset |
|
||||
| **`unraid_notifications`** | 9 | overview, list, warnings, create, archive, unread, delete, delete_archived, archive_all |
|
||||
| **`unraid_rclone`** | 4 | list_remotes, config_form, create_remote, delete_remote |
|
||||
| **`unraid_users`** | 1 | me |
|
||||
| **`unraid_keys`** | 5 | list, get, create, update, delete |
|
||||
| **`unraid_health`** | 3 | check, test_connection, diagnose |
|
||||
Call pattern: `unraid(action="<domain>", subaction="<operation>")`
|
||||
|
||||
### MCP Resources (Real-time Data)
|
||||
- `unraid://logs/stream` - Live log streaming from `/var/log/syslog` with WebSocket subscriptions
|
||||
| action= | Subactions | Description |
|
||||
|---------|-----------|-------------|
|
||||
| **`system`** | overview, array, network, registration, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config | Server info, metrics, network, UPS (18 subactions) |
|
||||
| **`health`** | check, test_connection, diagnose, setup | Health checks, connection test, diagnostics, interactive setup (4 subactions) |
|
||||
| **`array`** | parity_status, parity_history, parity_start, parity_pause, parity_resume, parity_cancel, start_array, stop_array, add_disk, remove_disk, mount_disk, unmount_disk, clear_disk_stats | Parity checks, array state, disk operations (13 subactions) |
|
||||
| **`disk`** | shares, disks, disk_details, log_files, logs, flash_backup | Shares, physical disks, log files (6 subactions) |
|
||||
| **`docker`** | list, details, start, stop, restart, networks, network_details | Container lifecycle and network inspection (7 subactions) |
|
||||
| **`vm`** | list, details, start, stop, pause, resume, force_stop, reboot, reset | Virtual machine lifecycle (9 subactions) |
|
||||
| **`notification`** | overview, list, create, archive, mark_unread, delete, delete_archived, archive_all, archive_many, unarchive_many, unarchive_all, recalculate | System notifications CRUD (12 subactions) |
|
||||
| **`key`** | list, get, create, update, delete, add_role, remove_role | API key management (7 subactions) |
|
||||
| **`plugin`** | list, add, remove | Plugin management (3 subactions) |
|
||||
| **`rclone`** | list_remotes, config_form, create_remote, delete_remote | Cloud storage remote management (4 subactions) |
|
||||
| **`setting`** | update, configure_ups | System settings and UPS config (2 subactions) |
|
||||
| **`customization`** | theme, public_theme, is_initial_setup, sso_enabled, set_theme | Theme and UI customization (5 subactions) |
|
||||
| **`oidc`** | providers, provider, configuration, public_providers, validate_session | OIDC/SSO provider management (5 subactions) |
|
||||
| **`user`** | me | Current authenticated user (1 subaction) |
|
||||
| **`live`** | cpu, memory, cpu_telemetry, array_state, parity_progress, ups_status, notifications_overview, owner, server_status, log_tail, notification_feed | Real-time WebSocket subscription snapshots (11 subactions) |
|
||||
|
||||
> **Note**: MCP Resources provide real-time data streams that can be accessed via MCP clients. The log stream resource automatically connects to your Unraid system logs and provides live updates.
|
||||
### Destructive Actions (require `confirm=True`)
|
||||
- **array**: `stop_array`, `remove_disk`, `clear_disk_stats`
|
||||
- **vm**: `force_stop`, `reset`
|
||||
- **notification**: `delete`, `delete_archived`
|
||||
- **rclone**: `delete_remote`
|
||||
- **key**: `delete`
|
||||
- **disk**: `flash_backup`
|
||||
- **setting**: `configure_ups`
|
||||
- **plugin**: `remove`
|
||||
|
||||
---
|
||||
### MCP Resources (Real-time Cached Data)
|
||||
|
||||
## 💬 Custom Slash Commands
|
||||
The server exposes two classes of MCP resources backed by persistent WebSocket connections:
|
||||
|
||||
The project includes **10 custom slash commands** in `commands/` for quick access to Unraid operations:
|
||||
**`unraid://live/*` — 9 snapshot resources** (auto-started, always-cached):
|
||||
- `unraid://live/cpu` — CPU utilization
|
||||
- `unraid://live/memory` — Memory usage
|
||||
- `unraid://live/cpu_telemetry` — Detailed CPU telemetry
|
||||
- `unraid://live/array_state` — Array state changes
|
||||
- `unraid://live/parity_progress` — Parity check progress
|
||||
- `unraid://live/ups_status` — UPS status
|
||||
- `unraid://live/notifications_overview` — Notification counts
|
||||
- `unraid://live/owner` — Owner info changes
|
||||
- `unraid://live/server_status` — Server status changes
|
||||
|
||||
### Available Commands
|
||||
**`unraid://logs/stream`** — Live log file tail (path controlled by `UNRAID_AUTOSTART_LOG_PATH`)
|
||||
|
||||
| Command | Actions | Quick Access |
|
||||
|---------|---------|--------------|
|
||||
| `/info` | 19 | System information, metrics, configuration |
|
||||
| `/array` | 5 | Parity check management |
|
||||
| `/storage` | 6 | Shares, disks, logs |
|
||||
| `/docker` | 15 | Container management and monitoring |
|
||||
| `/vm` | 9 | Virtual machine lifecycle |
|
||||
| `/notifications` | 9 | Alert management |
|
||||
| `/rclone` | 4 | Cloud storage remotes |
|
||||
| `/users` | 1 | Current user query |
|
||||
| `/keys` | 5 | API key management |
|
||||
| `/health` | 3 | System health checks |
|
||||
> **Note**: Resources return cached data from persistent WebSocket subscriptions. A `{"status": "connecting"}` placeholder is returned while the subscription initializes — retry in a moment.
|
||||
|
||||
### Example Usage
|
||||
|
||||
```bash
|
||||
# System monitoring
|
||||
/info overview
|
||||
/health check
|
||||
/storage shares
|
||||
|
||||
# Container management
|
||||
/docker list
|
||||
/docker start plex
|
||||
/docker logs nginx
|
||||
|
||||
# VM operations
|
||||
/vm list
|
||||
/vm start windows-10
|
||||
|
||||
# Notifications
|
||||
/notifications warnings
|
||||
/notifications archive_all
|
||||
|
||||
# User management
|
||||
/users list
|
||||
/keys create "Automation Key" "For CI/CD"
|
||||
```
|
||||
|
||||
### Command Features
|
||||
|
||||
Each slash command provides:
|
||||
- **Comprehensive documentation** of all available actions
|
||||
- **Argument hints** for required parameters
|
||||
- **Safety warnings** for destructive operations (⚠️)
|
||||
- **Usage examples** for common scenarios
|
||||
- **Action categorization** (Query, Lifecycle, Management, Destructive)
|
||||
|
||||
Run any command without arguments to see full documentation, or type `/help` to list all available commands.
|
||||
> **`log_tail` and `notification_feed`** are accessible as tool subactions (`unraid(action="live", subaction="log_tail")`) but are not registered as MCP resources — they use transient one-shot subscriptions and require parameters.
|
||||
|
||||
---
|
||||
|
||||
@@ -306,31 +312,43 @@ Run any command without arguments to see full documentation, or type `/help` to
|
||||
unraid-mcp/
|
||||
├── unraid_mcp/ # Main package
|
||||
│ ├── main.py # Entry point
|
||||
│ ├── server.py # FastMCP server setup
|
||||
│ ├── version.py # Version management (importlib.metadata)
|
||||
│ ├── config/ # Configuration management
|
||||
│ │ ├── settings.py # Environment & settings
|
||||
│ │ └── logging.py # Logging setup
|
||||
│ ├── core/ # Core infrastructure
|
||||
│ ├── core/ # Core infrastructure
|
||||
│ │ ├── client.py # GraphQL client
|
||||
│ │ ├── exceptions.py # Custom exceptions
|
||||
│ │ └── types.py # Shared data types
|
||||
│ │ ├── guards.py # Destructive action guards
|
||||
│ │ ├── setup.py # Interactive credential setup
|
||||
│ │ ├── types.py # Shared data types
|
||||
│ │ └── utils.py # Utility functions
|
||||
│ ├── subscriptions/ # Real-time subscriptions
|
||||
│ │ ├── manager.py # WebSocket management
|
||||
│ │ ├── resources.py # MCP resources
|
||||
│ │ └── diagnostics.py # Diagnostic tools
|
||||
│ ├── tools/ # MCP tool categories (10 tools, 76 actions)
|
||||
│ │ ├── info.py # System information (19 actions)
|
||||
│ │ ├── array.py # Parity checks (5 actions)
|
||||
│ │ ├── storage.py # Storage & monitoring (6 actions)
|
||||
│ │ ├── docker.py # Container management (15 actions)
|
||||
│ │ ├── virtualization.py # VM management (9 actions)
|
||||
│ │ ├── notifications.py # Notification management (9 actions)
|
||||
│ │ ├── rclone.py # Cloud storage (4 actions)
|
||||
│ │ ├── users.py # Current user query (1 action)
|
||||
│ │ ├── keys.py # API key management (5 actions)
|
||||
│ │ └── health.py # Health checks (3 actions)
|
||||
│ └── server.py # FastMCP server setup
|
||||
├── logs/ # Log files (auto-created)
|
||||
└── docker-compose.yml # Docker Compose deployment
|
||||
│ │ ├── manager.py # Persistent WebSocket manager
|
||||
│ │ ├── resources.py # MCP resources (unraid://live/*)
|
||||
│ │ ├── snapshot.py # Transient subscribe_once helpers
|
||||
│ │ ├── queries.py # Subscription query constants
|
||||
│ │ ├── diagnostics.py # Diagnostic tools
|
||||
│ │ └── utils.py # Subscription utility functions
|
||||
│ └── tools/ # Single consolidated tool (~108 actions)
|
||||
│ └── unraid.py # All 15 domains in one file
|
||||
├── tests/ # Test suite
|
||||
│ ├── conftest.py # Shared fixtures
|
||||
│ ├── test_*.py # Unit tests (per domain)
|
||||
│ ├── http_layer/ # httpx-level request tests
|
||||
│ ├── integration/ # WebSocket lifecycle tests
|
||||
│ ├── safety/ # Destructive action guard tests
|
||||
│ └── schema/ # GraphQL query validation
|
||||
├── docs/ # Documentation & API references
|
||||
├── scripts/ # Build and utility scripts
|
||||
├── skills/unraid/ # Claude skill assets
|
||||
├── .claude-plugin/ # Plugin manifest & marketplace config
|
||||
├── .env.example # Environment template
|
||||
├── Dockerfile # Container image definition
|
||||
├── docker-compose.yml # Docker Compose deployment
|
||||
├── pyproject.toml # Project config & dependencies
|
||||
└── logs/ # Log files (auto-created, gitignored)
|
||||
```
|
||||
|
||||
### Code Quality Commands
|
||||
@@ -346,6 +364,20 @@ uv run ty check unraid_mcp/
|
||||
uv run pytest
|
||||
```
|
||||
|
||||
### Integration Smoke-Tests (mcporter)
|
||||
|
||||
Live integration tests that exercise all non-destructive actions via [mcporter](https://github.com/mcporter/mcporter). Two scripts cover two transport modes:
|
||||
|
||||
```bash
|
||||
# stdio — no running server needed (good for CI)
|
||||
./tests/mcporter/test-tools.sh [--parallel] [--timeout-ms N] [--verbose]
|
||||
|
||||
# HTTP — connects to a live server (most up-to-date coverage)
|
||||
./tests/mcporter/test-actions.sh [MCP_URL] # default: http://localhost:6970/mcp
|
||||
```
|
||||
|
||||
Destructive actions are always skipped in both scripts. For safe testing strategies and exact mcporter commands per destructive action, see [`docs/DESTRUCTIVE_ACTIONS.md`](docs/DESTRUCTIVE_ACTIONS.md).
|
||||
|
||||
### API Schema Docs Automation
|
||||
```bash
|
||||
# Regenerate complete GraphQL schema reference from live introspection
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
description: Manage Unraid array parity checks
|
||||
argument-hint: [action] [correct=true/false]
|
||||
---
|
||||
|
||||
Execute the `unraid_array` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (5)
|
||||
|
||||
**Parity Check Operations:**
|
||||
- `parity_start` - Start parity check/sync (optional: correct=true to fix errors)
|
||||
- `parity_pause` - Pause running parity operation
|
||||
- `parity_resume` - Resume paused parity operation
|
||||
- `parity_cancel` - Cancel running parity operation
|
||||
- `parity_status` - Get current parity check status
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/array parity_start
|
||||
/array parity_start correct=true
|
||||
/array parity_pause
|
||||
/array parity_resume
|
||||
/array parity_cancel
|
||||
/array parity_status
|
||||
```
|
||||
|
||||
**Note:** Use `correct=true` with `parity_start` to automatically fix any parity errors found during the check.
|
||||
|
||||
Use the tool to execute the requested parity operation and report the results.
|
||||
@@ -1,48 +0,0 @@
|
||||
---
|
||||
description: Manage Docker containers on Unraid
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_docker` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (15)
|
||||
|
||||
**Query Operations:**
|
||||
- `list` - List all Docker containers with status
|
||||
- `details` - Get detailed info for a container (requires container identifier)
|
||||
- `logs` - Get container logs (requires container identifier)
|
||||
- `check_updates` - Check for available container updates
|
||||
- `port_conflicts` - Identify port conflicts
|
||||
- `networks` - List Docker networks
|
||||
- `network_details` - Get network details (requires network identifier)
|
||||
|
||||
**Container Lifecycle:**
|
||||
- `start` - Start a stopped container (requires container identifier)
|
||||
- `stop` - Stop a running container (requires container identifier)
|
||||
- `restart` - Restart a container (requires container identifier)
|
||||
- `pause` - Pause a running container (requires container identifier)
|
||||
- `unpause` - Unpause a paused container (requires container identifier)
|
||||
|
||||
**Updates & Management:**
|
||||
- `update` - Update a specific container (requires container identifier)
|
||||
- `update_all` - Update all containers with available updates
|
||||
|
||||
**⚠️ Destructive:**
|
||||
- `remove` - Permanently delete a container (requires container identifier + confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-docker list
|
||||
/unraid-docker details plex
|
||||
/unraid-docker logs plex
|
||||
/unraid-docker start nginx
|
||||
/unraid-docker restart sonarr
|
||||
/unraid-docker check_updates
|
||||
/unraid-docker update plex
|
||||
/unraid-docker port_conflicts
|
||||
```
|
||||
|
||||
**Container Identification:** Use container name, ID, or partial match (fuzzy search supported)
|
||||
|
||||
Use the tool to execute the requested Docker operation and report the results.
|
||||
@@ -1,59 +0,0 @@
|
||||
---
|
||||
description: Check Unraid system health and connectivity
|
||||
argument-hint: [action]
|
||||
---
|
||||
|
||||
Execute the `unraid_health` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (3)
|
||||
|
||||
**Health Monitoring:**
|
||||
- `check` - Comprehensive health check of all system components
|
||||
- `test_connection` - Test basic API connectivity
|
||||
- `diagnose` - Detailed diagnostic information for troubleshooting
|
||||
|
||||
## What Each Action Checks
|
||||
|
||||
### `check` - System Health
|
||||
- API connectivity and response time
|
||||
- Array status and disk health
|
||||
- Running services status
|
||||
- Docker container health
|
||||
- VM status
|
||||
- System resources (CPU, RAM, disk I/O)
|
||||
- Network connectivity
|
||||
- UPS status (if configured)
|
||||
|
||||
Returns: Overall health status (`HEALTHY`, `WARNING`, `CRITICAL`) with component details
|
||||
|
||||
### `test_connection` - Connectivity
|
||||
- GraphQL endpoint availability
|
||||
- Authentication validity
|
||||
- Basic query execution
|
||||
- Network latency
|
||||
|
||||
Returns: Connection status and latency metrics
|
||||
|
||||
### `diagnose` - Diagnostic Details
|
||||
- Full system configuration
|
||||
- Resource utilization trends
|
||||
- Error logs and warnings
|
||||
- Component-level diagnostics
|
||||
- Troubleshooting recommendations
|
||||
|
||||
Returns: Detailed diagnostic report
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-health check
|
||||
/unraid-health test_connection
|
||||
/unraid-health diagnose
|
||||
```
|
||||
|
||||
**Use Cases:**
|
||||
- `check` - Quick health status (monitoring dashboards)
|
||||
- `test_connection` - Verify API access (troubleshooting)
|
||||
- `diagnose` - Deep dive debugging (issue resolution)
|
||||
|
||||
Use the tool to execute the requested health check and present results with clear severity indicators.
|
||||
@@ -1,50 +0,0 @@
|
||||
---
|
||||
description: Query Unraid server information and configuration
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_info` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (19)
|
||||
|
||||
**System Overview:**
|
||||
- `overview` - Complete system summary with all key metrics
|
||||
- `server` - Server details (hostname, version, uptime)
|
||||
- `servers` - List all known Unraid servers
|
||||
|
||||
**Array & Storage:**
|
||||
- `array` - Array status, disks, and health
|
||||
|
||||
**Network & Registration:**
|
||||
- `network` - Network configuration and interfaces
|
||||
- `registration` - Registration status and license info
|
||||
- `connect` - Connect service configuration
|
||||
- `online` - Online status check
|
||||
|
||||
**Configuration:**
|
||||
- `config` - System configuration settings
|
||||
- `settings` - User settings and preferences
|
||||
- `variables` - Environment variables
|
||||
- `display` - Display settings
|
||||
|
||||
**Services & Monitoring:**
|
||||
- `services` - Running services status
|
||||
- `metrics` - System metrics (CPU, RAM, disk I/O)
|
||||
- `ups_devices` - List all UPS devices
|
||||
- `ups_device` - Get specific UPS device details (requires device_id)
|
||||
- `ups_config` - UPS configuration
|
||||
|
||||
**Ownership:**
|
||||
- `owner` - Server owner information
|
||||
- `flash` - USB flash drive details
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-info overview
|
||||
/unraid-info array
|
||||
/unraid-info metrics
|
||||
/unraid-info ups_device [device-id]
|
||||
```
|
||||
|
||||
Use the tool to retrieve the requested information and present it in a clear, formatted manner.
|
||||
@@ -1,37 +0,0 @@
|
||||
---
|
||||
description: Manage Unraid API keys for authentication
|
||||
argument-hint: [action] [key-id]
|
||||
---
|
||||
|
||||
Execute the `unraid_keys` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (5)
|
||||
|
||||
**Query Operations:**
|
||||
- `list` - List all API keys with metadata
|
||||
- `get` - Get details for a specific API key (requires key_id)
|
||||
|
||||
**Management Operations:**
|
||||
- `create` - Create a new API key (requires name, optional description and expiry)
|
||||
- `update` - Update an existing API key (requires key_id, name, description)
|
||||
|
||||
**⚠️ Destructive:**
|
||||
- `delete` - Permanently revoke an API key (requires key_id + confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-keys list
|
||||
/unraid-keys get [key-id]
|
||||
/unraid-keys create "MCP Server Key" "Key for unraid-mcp integration"
|
||||
/unraid-keys update [key-id] "Updated Name" "Updated description"
|
||||
```
|
||||
|
||||
**Key Format:** PrefixedID (`hex64:suffix`)
|
||||
|
||||
**IMPORTANT:**
|
||||
- Deleted keys are immediately revoked and cannot be recovered
|
||||
- Store new keys securely - they're only shown once during creation
|
||||
- Set expiry dates for keys used in automation
|
||||
|
||||
Use the tool to execute the requested API key operation and report the results.
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
description: Manage Unraid system notifications and alerts
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_notifications` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (9)
|
||||
|
||||
**Query Operations:**
|
||||
- `overview` - Summary of notification counts by category
|
||||
- `list` - List all notifications with details
|
||||
- `warnings` - List only warning/error notifications
|
||||
- `unread` - List unread notifications only
|
||||
|
||||
**Management Operations:**
|
||||
- `create` - Create a new notification (requires title, message, severity)
|
||||
- `archive` - Archive a specific notification (requires notification_id)
|
||||
- `archive_all` - Archive all current notifications
|
||||
|
||||
**⚠️ Destructive Operations:**
|
||||
- `delete` - Permanently delete a notification (requires notification_id + confirmation)
|
||||
- `delete_archived` - Permanently delete all archived notifications (requires confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-notifications overview
|
||||
/unraid-notifications list
|
||||
/unraid-notifications warnings
|
||||
/unraid-notifications unread
|
||||
/unraid-notifications create "Test Alert" "This is a test" normal
|
||||
/unraid-notifications archive [notification-id]
|
||||
/unraid-notifications archive_all
|
||||
```
|
||||
|
||||
**Severity Levels:** `normal`, `warning`, `alert`, `critical`
|
||||
|
||||
**IMPORTANT:** Delete operations are permanent and cannot be undone.
|
||||
|
||||
Use the tool to execute the requested notification operation and present results clearly.
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
description: Manage Rclone cloud storage remotes on Unraid
|
||||
argument-hint: [action] [remote-name]
|
||||
---
|
||||
|
||||
Execute the `unraid_rclone` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (4)
|
||||
|
||||
**Query Operations:**
|
||||
- `list_remotes` - List all configured Rclone remotes
|
||||
- `config_form` - Get configuration form for a remote type (requires remote_type)
|
||||
|
||||
**Management Operations:**
|
||||
- `create_remote` - Create a new Rclone remote (requires remote_name, remote_type, config)
|
||||
|
||||
**⚠️ Destructive:**
|
||||
- `delete_remote` - Permanently delete a remote (requires remote_name + confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-rclone list_remotes
|
||||
/unraid-rclone config_form s3
|
||||
/unraid-rclone create_remote mybackup s3 {"access_key":"...","secret_key":"..."}
|
||||
```
|
||||
|
||||
**Supported Remote Types:** s3, dropbox, google-drive, onedrive, backblaze, ftp, sftp, webdav, etc.
|
||||
|
||||
**IMPORTANT:** Deleting a remote does NOT delete cloud data, only the local configuration.
|
||||
|
||||
Use the tool to execute the requested Rclone operation and report the results.
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
description: Query Unraid storage, shares, and disk information
|
||||
argument-hint: [action] [additional-args]
|
||||
---
|
||||
|
||||
Execute the `unraid_storage` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (6)
|
||||
|
||||
**Shares & Disks:**
|
||||
- `shares` - List all user shares with sizes and allocation
|
||||
- `disks` - List all disks in the array
|
||||
- `disk_details` - Get detailed info for a specific disk (requires disk identifier)
|
||||
- `unassigned` - List unassigned devices
|
||||
|
||||
**Logs:**
|
||||
- `log_files` - List available system log files
|
||||
- `logs` - Read log file contents (requires log file path)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-storage shares
|
||||
/unraid-storage disks
|
||||
/unraid-storage disk_details disk1
|
||||
/unraid-storage unassigned
|
||||
/unraid-storage log_files
|
||||
/unraid-storage logs /var/log/syslog
|
||||
```
|
||||
|
||||
**Note:** Log file paths must start with `/var/log/`, `/boot/logs/`, or `/mnt/`
|
||||
|
||||
Use the tool to retrieve the requested storage information and present it clearly.
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
description: Query current authenticated Unraid user
|
||||
argument-hint: [action]
|
||||
---
|
||||
|
||||
Execute the `unraid_users` MCP tool with action: `$1`
|
||||
|
||||
## Available Actions (1)
|
||||
|
||||
**Query Operation:**
|
||||
- `me` - Get current authenticated user info (id, name, description, roles)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/users me
|
||||
```
|
||||
|
||||
## API Limitation
|
||||
|
||||
⚠️ **Note:** The Unraid GraphQL API does not support user management operations. Only the `me` query is available, which returns information about the currently authenticated user (the API key holder).
|
||||
|
||||
**Not supported:**
|
||||
- Listing all users
|
||||
- Getting other user details
|
||||
- Adding/deleting users
|
||||
- Cloud/remote access queries
|
||||
|
||||
For user management, use the Unraid web UI.
|
||||
|
||||
Use the tool to query the current authenticated user and report the results.
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
description: Manage virtual machines on Unraid
|
||||
argument-hint: [action] [vm-id]
|
||||
---
|
||||
|
||||
Execute the `unraid_vm` MCP tool with action: `$1` and vm_id: `$2`
|
||||
|
||||
## Available Actions (9)
|
||||
|
||||
**Query Operations:**
|
||||
- `list` - List all VMs with status and resource allocation
|
||||
- `details` - Get detailed info for a VM (requires vm_id)
|
||||
|
||||
**Lifecycle Operations:**
|
||||
- `start` - Start a stopped VM (requires vm_id)
|
||||
- `stop` - Gracefully stop a running VM (requires vm_id)
|
||||
- `pause` - Pause a running VM (requires vm_id)
|
||||
- `resume` - Resume a paused VM (requires vm_id)
|
||||
- `reboot` - Gracefully reboot a VM (requires vm_id)
|
||||
|
||||
**⚠️ Destructive Operations:**
|
||||
- `force_stop` - Forcefully power off VM (like pulling power cord - requires vm_id + confirmation)
|
||||
- `reset` - Hard reset VM (power cycle without graceful shutdown - requires vm_id + confirmation)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/unraid-vm list
|
||||
/unraid-vm details windows-10
|
||||
/unraid-vm start ubuntu-server
|
||||
/unraid-vm stop windows-10
|
||||
/unraid-vm pause debian-vm
|
||||
/unraid-vm resume debian-vm
|
||||
/unraid-vm reboot ubuntu-server
|
||||
```
|
||||
|
||||
**VM Identification:** Use VM ID (PrefixedID format: `hex64:suffix`)
|
||||
|
||||
**IMPORTANT:** `force_stop` and `reset` bypass graceful shutdown and may corrupt VM filesystem. Use `stop` instead for safe shutdowns.
|
||||
|
||||
Use the tool to execute the requested VM operation and report the results.
|
||||
@@ -5,31 +5,43 @@ services:
|
||||
dockerfile: Dockerfile
|
||||
container_name: unraid-mcp
|
||||
restart: unless-stopped
|
||||
read_only: true
|
||||
cap_drop:
|
||||
- ALL
|
||||
tmpfs:
|
||||
- /tmp:noexec,nosuid,size=64m
|
||||
- /app/logs:noexec,nosuid,size=16m
|
||||
- /app/.cache/logs:noexec,nosuid,size=8m
|
||||
ports:
|
||||
# HostPort:ContainerPort (maps to UNRAID_MCP_PORT inside the container, default 6970)
|
||||
# Change the host port (left side) if 6970 is already in use on your host
|
||||
- "${UNRAID_MCP_PORT:-6970}:${UNRAID_MCP_PORT:-6970}"
|
||||
env_file:
|
||||
- path: ${HOME}/.unraid-mcp/.env
|
||||
required: false # Don't fail if file missing; environment: block below takes over
|
||||
environment:
|
||||
# Core API Configuration (Required)
|
||||
- UNRAID_API_URL=${UNRAID_API_URL}
|
||||
- UNRAID_API_KEY=${UNRAID_API_KEY}
|
||||
|
||||
# Sourced from ~/.unraid-mcp/.env via env_file above (if present),
|
||||
# or set these directly here. The :? syntax fails fast if unset.
|
||||
- UNRAID_API_URL=${UNRAID_API_URL:?UNRAID_API_URL is required}
|
||||
- UNRAID_API_KEY=${UNRAID_API_KEY:?UNRAID_API_KEY is required}
|
||||
|
||||
# MCP Server Settings
|
||||
- UNRAID_MCP_PORT=${UNRAID_MCP_PORT:-6970}
|
||||
- UNRAID_MCP_HOST=${UNRAID_MCP_HOST:-0.0.0.0}
|
||||
- UNRAID_MCP_TRANSPORT=${UNRAID_MCP_TRANSPORT:-streamable-http}
|
||||
|
||||
|
||||
# SSL Configuration
|
||||
- UNRAID_VERIFY_SSL=${UNRAID_VERIFY_SSL:-true}
|
||||
|
||||
|
||||
# Logging Configuration
|
||||
- UNRAID_MCP_LOG_LEVEL=${UNRAID_MCP_LOG_LEVEL:-INFO}
|
||||
- UNRAID_MCP_LOG_FILE=${UNRAID_MCP_LOG_FILE:-unraid-mcp.log}
|
||||
|
||||
|
||||
# Real-time Subscription Configuration
|
||||
- UNRAID_AUTO_START_SUBSCRIPTIONS=${UNRAID_AUTO_START_SUBSCRIPTIONS:-true}
|
||||
- UNRAID_MAX_RECONNECT_ATTEMPTS=${UNRAID_MAX_RECONNECT_ATTEMPTS:-10}
|
||||
|
||||
|
||||
# Optional: Custom log file path for subscription auto-start diagnostics
|
||||
- UNRAID_AUTOSTART_LOG_PATH=${UNRAID_AUTOSTART_LOG_PATH}
|
||||
# Optional: If you want to mount a specific directory for logs (ensure UNRAID_MCP_LOG_FILE points within this mount)
|
||||
|
||||
@@ -1,240 +1,258 @@
|
||||
# Destructive Actions Inventory
|
||||
# Destructive Actions
|
||||
|
||||
This file lists all destructive actions across the unraid-mcp tools. Fill in the "Testing Strategy" column to specify how each should be tested in the mcporter integration test suite.
|
||||
**Last Updated:** 2026-03-16
|
||||
**Total destructive actions:** 12 across 8 domains (single `unraid` tool)
|
||||
|
||||
**Last Updated:** 2026-02-15
|
||||
All destructive actions require `confirm=True` at the call site. There is no additional environment variable gate — `confirm` is the sole guard.
|
||||
|
||||
> **mcporter commands below** use `$MCP_URL` (default: `http://localhost:6970/mcp`). Run `test-actions.sh` for automated non-destructive coverage; destructive actions are always skipped there and tested manually per the strategies below.
|
||||
>
|
||||
> **Calling convention (v1.0.0+):** All operations use the single `unraid` tool with `action` (domain) + `subaction` (operation). For example:
|
||||
> `mcporter call --http-url "$MCP_URL" --tool unraid --args '{"action":"docker","subaction":"list"}'`
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
## `array`
|
||||
|
||||
- **Total Destructive Actions:** 8 (after removing 4 array operations)
|
||||
- **Tools with Destructive Actions:** 6
|
||||
- **Environment Variable Gates:** 6 (one per tool)
|
||||
### `stop_array` — Stop the Unraid array
|
||||
|
||||
**Strategy: mock/safety audit only.**
|
||||
Stopping the array unmounts all shares and can interrupt running containers and VMs accessing array data. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. Do not run live unless all containers and VMs are shut down first.
|
||||
|
||||
---
|
||||
|
||||
## Destructive Actions by Tool
|
||||
|
||||
### 1. Docker (1 action)
|
||||
|
||||
| Action | Description | Risk Level | Env Var Gate | Testing Strategy |
|
||||
|--------|-------------|------------|--------------|------------------|
|
||||
| `remove` | Permanently delete a Docker container | **HIGH** - Data loss, irreversible | `UNRAID_ALLOW_DOCKER_DESTRUCTIVE` | **TODO: Specify testing approach** |
|
||||
|
||||
**Notes:**
|
||||
- Container must be stopped first
|
||||
- Removes container config and any non-volume data
|
||||
- Cannot be undone
|
||||
|
||||
---
|
||||
|
||||
### 2. Virtual Machines (2 actions)
|
||||
|
||||
| Action | Description | Risk Level | Env Var Gate | Testing Strategy |
|
||||
|--------|-------------|------------|--------------|------------------|
|
||||
| `force_stop` | Forcefully power off a running VM (equivalent to pulling power cord) | **MEDIUM** - Severe but recoverable, risk of data corruption | `UNRAID_ALLOW_VM_DESTRUCTIVE` | **TODO: Specify testing approach** |
|
||||
| `reset` | Hard reset a VM (power cycle without graceful shutdown) | **MEDIUM** - Severe but recoverable, risk of data corruption | `UNRAID_ALLOW_VM_DESTRUCTIVE` | **TODO: Specify testing approach** |
|
||||
|
||||
**Notes:**
|
||||
- Both bypass graceful shutdown procedures
|
||||
- May corrupt VM filesystem if used during write operations
|
||||
- Use `stop` action instead for graceful shutdown
|
||||
|
||||
---
|
||||
|
||||
### 3. Notifications (2 actions)
|
||||
|
||||
| Action | Description | Risk Level | Env Var Gate | Testing Strategy |
|
||||
|--------|-------------|------------|--------------|------------------|
|
||||
| `delete` | Permanently delete a notification | **HIGH** - Data loss, irreversible | `UNRAID_ALLOW_NOTIFICATIONS_DESTRUCTIVE` | **TODO: Specify testing approach** |
|
||||
| `delete_archived` | Permanently delete all archived notifications | **HIGH** - Bulk data loss, irreversible | `UNRAID_ALLOW_NOTIFICATIONS_DESTRUCTIVE` | **TODO: Specify testing approach** |
|
||||
|
||||
**Notes:**
|
||||
- Cannot recover deleted notifications
|
||||
- `delete_archived` affects ALL archived notifications (bulk operation)
|
||||
|
||||
---
|
||||
|
||||
### 4. Rclone (1 action)
|
||||
|
||||
| Action | Description | Risk Level | Env Var Gate | Testing Strategy |
|
||||
|--------|-------------|------------|--------------|------------------|
|
||||
| `delete_remote` | Permanently delete an rclone remote configuration | **HIGH** - Data loss, irreversible | `UNRAID_ALLOW_RCLONE_DESTRUCTIVE` | **TODO: Specify testing approach** |
|
||||
|
||||
**Notes:**
|
||||
- Removes cloud storage connection configuration
|
||||
- Does NOT delete data in the remote storage
|
||||
- Must reconfigure remote from scratch if deleted
|
||||
|
||||
---
|
||||
|
||||
### 5. Users (1 action)
|
||||
|
||||
| Action | Description | Risk Level | Env Var Gate | Testing Strategy |
|
||||
|--------|-------------|------------|--------------|------------------|
|
||||
| `delete` | Permanently delete a user account | **HIGH** - Data loss, irreversible | `UNRAID_ALLOW_USERS_DESTRUCTIVE` | **TODO: Specify testing approach** |
|
||||
|
||||
**Notes:**
|
||||
- Removes user account and permissions
|
||||
- Cannot delete the root user
|
||||
- User's data may remain but become orphaned
|
||||
|
||||
---
|
||||
|
||||
### 6. API Keys (1 action)
|
||||
|
||||
| Action | Description | Risk Level | Env Var Gate | Testing Strategy |
|
||||
|--------|-------------|------------|--------------|------------------|
|
||||
| `delete` | Permanently delete an API key | **HIGH** - Data loss, irreversible, breaks integrations | `UNRAID_ALLOW_KEYS_DESTRUCTIVE` | **TODO: Specify testing approach** |
|
||||
|
||||
**Notes:**
|
||||
- Immediately revokes API key access
|
||||
- Will break any integrations using the deleted key
|
||||
- Cannot be undone - must create new key
|
||||
|
||||
---
|
||||
|
||||
## Removed Actions (No Longer Exposed)
|
||||
|
||||
These actions were previously marked as destructive but have been **removed** from the array tool per the implementation plan:
|
||||
|
||||
| Action | Former Risk Level | Reason for Removal |
|
||||
|--------|-------------------|-------------------|
|
||||
| `start` | CRITICAL | System-wide impact - should not be exposed via MCP |
|
||||
| `stop` | CRITICAL | System-wide impact - should not be exposed via MCP |
|
||||
| `shutdown` | CRITICAL | System-wide impact - could cause data loss |
|
||||
| `reboot` | CRITICAL | System-wide impact - disrupts all services |
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy Options
|
||||
|
||||
Choose one of the following for each action in the "Testing Strategy" column:
|
||||
|
||||
### Option 1: Mock/Validation Only
|
||||
- Test parameter validation
|
||||
- Test `confirm=True` requirement
|
||||
- Test env var gate requirement
|
||||
- **DO NOT** execute the actual action
|
||||
|
||||
### Option 2: Dry-Run Testing
|
||||
- Test with `confirm=false` to verify rejection
|
||||
- Test without env var to verify gate
|
||||
- **DO NOT** execute with both gates passed
|
||||
|
||||
### Option 3: Test Server Execution
|
||||
- Execute on a dedicated test Unraid server (e.g., shart)
|
||||
- Requires pre-created test resources (containers, VMs, notifications)
|
||||
- Verify action succeeds and state changes as expected
|
||||
- Clean up after test
|
||||
|
||||
### Option 4: Manual Test Checklist
|
||||
- Document manual verification steps
|
||||
- Do not automate in mcporter suite
|
||||
- Requires human operator to execute and verify
|
||||
|
||||
### Option 5: Skip Testing
|
||||
- Too dangerous to automate
|
||||
- Rely on unit tests only
|
||||
- Document why testing is skipped
|
||||
|
||||
---
|
||||
|
||||
## Example Testing Strategies
|
||||
|
||||
**Safe approach (recommended for most):**
|
||||
```
|
||||
Option 1: Mock/Validation Only
|
||||
- Verify action requires UNRAID_ALLOW_DOCKER_DESTRUCTIVE=true
|
||||
- Verify action requires confirm=True
|
||||
- Do not execute actual deletion
|
||||
```
|
||||
|
||||
**Comprehensive approach (for test server only):**
|
||||
```
|
||||
Option 3: Test Server Execution on 'shart'
|
||||
- Create test container 'mcporter-test-container'
|
||||
- Execute remove with gates enabled
|
||||
- Verify container is deleted
|
||||
- Clean up not needed (container already removed)
|
||||
```
|
||||
|
||||
**Hybrid approach:**
|
||||
```
|
||||
Option 1 + Option 4: Mock validation + Manual checklist
|
||||
- Automated: Test gate requirements
|
||||
- Manual: Human operator verifies on test server
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Usage in mcporter Tests
|
||||
|
||||
Each tool test script will check the testing strategy:
|
||||
### `remove_disk` — Remove a disk from the array
|
||||
|
||||
```bash
|
||||
# Example from test_docker.sh
|
||||
test_remove_action() {
|
||||
local strategy="TODO: Specify testing approach" # From this file
|
||||
# Prerequisite: array must already be stopped; use a disk you intend to remove
|
||||
|
||||
case "$strategy" in
|
||||
*"Option 1"*|*"Mock"*)
|
||||
# Mock/validation testing
|
||||
test_remove_requires_env_var
|
||||
test_remove_requires_confirm
|
||||
;;
|
||||
*"Option 3"*|*"Test Server"*)
|
||||
# Real execution on test server
|
||||
if [[ "$UNRAID_TEST_SERVER" != "unraid-shart" ]]; then
|
||||
echo "SKIP: Destructive test only runs on test server"
|
||||
return 2
|
||||
fi
|
||||
test_remove_real_execution
|
||||
;;
|
||||
*"Option 5"*|*"Skip"*)
|
||||
echo "SKIP: Testing disabled for this action"
|
||||
return 2
|
||||
;;
|
||||
esac
|
||||
}
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"array","subaction":"remove_disk","disk_id":"<DISK_ID>","confirm":true}' --output json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security Model
|
||||
### `clear_disk_stats` — Clear I/O statistics for a disk (irreversible)
|
||||
|
||||
**Two-tier security for destructive actions:**
|
||||
```bash
|
||||
# Discover disk IDs
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"disk","subaction":"disks"}' --output json
|
||||
|
||||
1. **Environment Variable Gate** (first line of defense)
|
||||
- Must be explicitly enabled per tool
|
||||
- Defaults to disabled (safe)
|
||||
- Prevents accidental execution
|
||||
|
||||
2. **Runtime Confirmation** (second line of defense)
|
||||
- Must pass `confirm=True` in each call
|
||||
- Forces explicit acknowledgment per operation
|
||||
- Cannot be cached or preset
|
||||
|
||||
**Both must pass for execution.**
|
||||
# Clear stats for a specific disk
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"array","subaction":"clear_disk_stats","disk_id":"<DISK_ID>","confirm":true}' --output json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
## `vm`
|
||||
|
||||
1. **Fill in Testing Strategy column** for each action above
|
||||
2. **Create test fixtures** if using Option 3 (test containers, VMs, etc.)
|
||||
3. **Implement tool test scripts** following the specified strategies
|
||||
4. **Document any special setup** required for destructive testing
|
||||
### `force_stop` — Hard power-off a VM (potential data corruption)
|
||||
|
||||
```bash
|
||||
# Prerequisite: create a minimal Alpine test VM in Unraid VM manager
|
||||
# (Alpine ISO, 512MB RAM, no persistent disk, name contains "mcp-test")
|
||||
|
||||
VID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"vm","subaction":"list"}' --output json \
|
||||
| python3 -c "import json,sys; vms=json.load(sys.stdin).get('vms',[]); print(next(v.get('uuid',v.get('id','')) for v in vms if 'mcp-test' in v.get('name','')))")
|
||||
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"vm\",\"subaction\":\"force_stop\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json
|
||||
|
||||
# Verify: VM state should return to stopped
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"vm\",\"subaction\":\"details\",\"vm_id\":\"$VID\"}" --output json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Questions to Consider
|
||||
### `reset` — Hard reset a VM (power cycle without graceful shutdown)
|
||||
|
||||
For each action, ask:
|
||||
- Is this safe to automate on a test server?
|
||||
- Do we have test fixtures/resources available?
|
||||
- What cleanup is required after testing?
|
||||
- What's the blast radius if something goes wrong?
|
||||
- Can we verify the action worked without side effects?
|
||||
```bash
|
||||
# Same minimal Alpine test VM as above
|
||||
VID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"vm","subaction":"list"}' --output json \
|
||||
| python3 -c "import json,sys; vms=json.load(sys.stdin).get('vms',[]); print(next(v.get('uuid',v.get('id','')) for v in vms if 'mcp-test' in v.get('name','')))")
|
||||
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"vm\",\"subaction\":\"reset\",\"vm_id\":\"$VID\",\"confirm\":true}" --output json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `notification`
|
||||
|
||||
### `delete` — Permanently delete a notification
|
||||
|
||||
```bash
|
||||
# 1. Create a test notification, then list to get the real stored ID (create response
|
||||
# ID is ULID-based; stored filename uses a unix timestamp, so IDs differ)
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"create","title":"mcp-test-delete","subject":"safe to delete","description":"MCP destructive action test","importance":"INFO"}' --output json
|
||||
NID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"list","notification_type":"UNREAD"}' --output json \
|
||||
| python3 -c "
|
||||
import json,sys
|
||||
notifs=json.load(sys.stdin).get('notifications',[])
|
||||
matches=[n['id'] for n in reversed(notifs) if n.get('title')=='mcp-test-delete']
|
||||
print(matches[0] if matches else '')")
|
||||
|
||||
# 2. Delete it (notification_type required)
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"notification\",\"subaction\":\"delete\",\"notification_id\":\"$NID\",\"notification_type\":\"UNREAD\",\"confirm\":true}" --output json
|
||||
|
||||
# 3. Verify
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"list"}' --output json | python3 -c \
|
||||
"import json,sys; ns=[n for n in json.load(sys.stdin).get('notifications',[]) if 'mcp-test' in n.get('title','')]; print('clean' if not ns else ns)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### `delete_archived` — Wipe all archived notifications (bulk, irreversible)
|
||||
|
||||
```bash
|
||||
# 1. Create and archive a test notification
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"create","title":"mcp-test-archive-wipe","subject":"archive me","description":"safe to delete","importance":"INFO"}' --output json
|
||||
AID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"list","notification_type":"UNREAD"}' --output json \
|
||||
| python3 -c "
|
||||
import json,sys
|
||||
notifs=json.load(sys.stdin).get('notifications',[])
|
||||
matches=[n['id'] for n in reversed(notifs) if n.get('title')=='mcp-test-archive-wipe']
|
||||
print(matches[0] if matches else '')")
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"notification\",\"subaction\":\"archive\",\"notification_id\":\"$AID\"}" --output json
|
||||
|
||||
# 2. Wipe all archived
|
||||
# NOTE: this deletes ALL archived notifications, not just the test one
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"notification","subaction":"delete_archived","confirm":true}' --output json
|
||||
```
|
||||
|
||||
> Run on `shart` if archival history on `tootie` matters.
|
||||
|
||||
---
|
||||
|
||||
## `rclone`
|
||||
|
||||
### `delete_remote` — Remove an rclone remote configuration
|
||||
|
||||
```bash
|
||||
# 1. Create a throwaway local remote (points to /tmp — no real data)
|
||||
# Parameters: name (str), provider_type (str), config_data (dict)
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"rclone","subaction":"create_remote","name":"mcp-test-remote","provider_type":"local","config_data":{"root":"/tmp"}}' --output json
|
||||
|
||||
# 2. Delete it
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"rclone","subaction":"delete_remote","name":"mcp-test-remote","confirm":true}' --output json
|
||||
|
||||
# 3. Verify
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"rclone","subaction":"list_remotes"}' --output json | python3 -c \
|
||||
"import json,sys; remotes=json.load(sys.stdin).get('remotes',[]); print('clean' if 'mcp-test-remote' not in remotes else 'FOUND — cleanup failed')"
|
||||
```
|
||||
|
||||
> Note: `delete_remote` removes the config only — it does NOT delete data in the remote storage.
|
||||
|
||||
---
|
||||
|
||||
## `key`
|
||||
|
||||
### `delete` — Delete an API key (immediately revokes access)
|
||||
|
||||
```bash
|
||||
# 1. Create a test key (names cannot contain hyphens; ID is at key.id)
|
||||
KID=$(mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"key","subaction":"create","name":"mcp test key","roles":["VIEWER"]}' --output json \
|
||||
| python3 -c "import json,sys; print(json.load(sys.stdin).get('key',{}).get('id',''))")
|
||||
|
||||
# 2. Delete it
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args "{\"action\":\"key\",\"subaction\":\"delete\",\"key_id\":\"$KID\",\"confirm\":true}" --output json
|
||||
|
||||
# 3. Verify
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"key","subaction":"list"}' --output json | python3 -c \
|
||||
"import json,sys; ks=json.load(sys.stdin).get('keys',[]); print('clean' if not any('mcp test key' in k.get('name','') for k in ks) else 'FOUND — cleanup failed')"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `disk`
|
||||
|
||||
### `flash_backup` — Rclone backup of flash drive (overwrites destination)
|
||||
|
||||
```bash
|
||||
# Prerequisite: create a dedicated test remote pointing away from real backup destination
|
||||
# (use rclone create_remote first, or configure mcp-test-remote manually)
|
||||
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"disk","subaction":"flash_backup","remote_name":"mcp-test-remote","source_path":"/boot","destination_path":"/flash-backup-test","confirm":true}' --output json
|
||||
```
|
||||
|
||||
> Never point at the same destination as your real flash backup. Create a dedicated `mcp-test-remote` (see `rclone: delete_remote` above for provisioning pattern).
|
||||
|
||||
---
|
||||
|
||||
## `setting`
|
||||
|
||||
### `configure_ups` — Overwrite UPS monitoring configuration
|
||||
|
||||
**Strategy: mock/safety audit only.**
|
||||
Wrong config can break UPS integration. If live testing is required: read current config via `unraid(action="system", subaction="ups_config")`, save values, re-apply identical values (no-op), verify response matches. Test via `tests/safety/` for guard behavior.
|
||||
|
||||
---
|
||||
|
||||
## `plugin`
|
||||
|
||||
### `remove` — Uninstall a plugin (irreversible without re-install)
|
||||
|
||||
**Strategy: mock/safety audit only.**
|
||||
Removing a plugin cannot be undone without a full re-install. Test via `tests/safety/` confirming the `confirm=False` guard raises `ToolError`. Do not run live unless the plugin is intentionally being uninstalled.
|
||||
|
||||
```bash
|
||||
# If live testing is necessary (intentional removal only):
|
||||
mcporter call --http-url "$MCP_URL" --tool unraid \
|
||||
--args '{"action":"plugin","subaction":"remove","names":["<plugin-name>"],"confirm":true}' --output json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Safety Audit (Automated)
|
||||
|
||||
The `tests/safety/` directory contains pytest tests that verify:
|
||||
- Every destructive action raises `ToolError` when called with `confirm=False`
|
||||
- Every destructive action raises `ToolError` when called without the `confirm` parameter
|
||||
- The `_*_DESTRUCTIVE` sets in `unraid_mcp/tools/unraid.py` stay in sync with the actions listed above
|
||||
- No GraphQL request reaches the network layer when confirmation is missing (`TestNoGraphQLCallsWhenUnconfirmed`)
|
||||
- Non-destructive actions never require `confirm` (`TestNonDestructiveActionsNeverRequireConfirm`)
|
||||
|
||||
These run as part of the standard test suite:
|
||||
|
||||
```bash
|
||||
uv run pytest tests/safety/ -v
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Summary Table
|
||||
|
||||
| Domain (`action=`) | Subaction | Strategy | Target Server |
|
||||
|--------------------|-----------|----------|---------------|
|
||||
| `array` | `stop_array` | Mock/safety audit only | — |
|
||||
| `array` | `remove_disk` | Array must be stopped; use intended disk | either |
|
||||
| `array` | `clear_disk_stats` | Discover disk ID → clear | either |
|
||||
| `vm` | `force_stop` | Minimal Alpine test VM | either |
|
||||
| `vm` | `reset` | Minimal Alpine test VM | either |
|
||||
| `notification` | `delete` | Create notification → destroy | either |
|
||||
| `notification` | `delete_archived` | Create → archive → wipe | shart preferred |
|
||||
| `rclone` | `delete_remote` | Create local:/tmp remote → destroy | either |
|
||||
| `key` | `delete` | Create test key → destroy | either |
|
||||
| `disk` | `flash_backup` | Dedicated test remote, isolated path | either |
|
||||
| `setting` | `configure_ups` | Mock/safety audit only | — |
|
||||
| `plugin` | `remove` | Mock/safety audit only | — |
|
||||
|
||||
@@ -14,10 +14,10 @@ The marketplace catalog that lists all available plugins in this repository.
|
||||
- Plugin catalog with the "unraid" skill
|
||||
- Categories and tags for discoverability
|
||||
|
||||
### 2. Plugin Manifest (`skills/unraid/.claude-plugin/plugin.json`)
|
||||
### 2. Plugin Manifest (`.claude-plugin/plugin.json`)
|
||||
The individual plugin configuration for the Unraid skill.
|
||||
|
||||
**Location:** `skills/unraid/.claude-plugin/plugin.json`
|
||||
**Location:** `.claude-plugin/plugin.json`
|
||||
|
||||
**Contents:**
|
||||
- Plugin name, version, author
|
||||
@@ -73,12 +73,11 @@ Users can also install from a specific commit or branch:
|
||||
|
||||
```text
|
||||
unraid-mcp/
|
||||
├── .claude-plugin/ # Marketplace manifest
|
||||
│ ├── marketplace.json
|
||||
│ └── README.md
|
||||
├── skills/unraid/ # Plugin directory
|
||||
│ ├── .claude-plugin/ # Plugin manifest
|
||||
│ │ └── plugin.json
|
||||
├── .claude-plugin/ # Plugin manifest + marketplace manifest
|
||||
│ ├── plugin.json # Plugin configuration (name, version, mcpServers)
|
||||
│ ├── marketplace.json # Marketplace catalog
|
||||
│ └── README.md # Marketplace installation guide
|
||||
├── skills/unraid/ # Skill documentation and helpers
|
||||
│ ├── SKILL.md # Skill documentation
|
||||
│ ├── README.md # Plugin documentation
|
||||
│ ├── examples/ # Example scripts
|
||||
@@ -112,7 +111,7 @@ Before publishing to GitHub:
|
||||
|
||||
2. **Update Version Numbers**
|
||||
- Bump version in `.claude-plugin/marketplace.json`
|
||||
- Bump version in `skills/unraid/.claude-plugin/plugin.json`
|
||||
- Bump version in `.claude-plugin/plugin.json`
|
||||
- Update version in `README.md` if needed
|
||||
|
||||
3. **Test Locally**
|
||||
@@ -123,15 +122,15 @@ Before publishing to GitHub:
|
||||
|
||||
4. **Commit and Push**
|
||||
```bash
|
||||
git add .claude-plugin/ skills/unraid/.claude-plugin/
|
||||
git add .claude-plugin/
|
||||
git commit -m "feat: add Claude Code marketplace configuration"
|
||||
git push origin main
|
||||
```
|
||||
|
||||
5. **Create Release Tag** (Optional)
|
||||
```bash
|
||||
git tag -a v0.2.0 -m "Release v0.2.0"
|
||||
git push origin v0.2.0
|
||||
git tag -a v1.0.0 -m "Release v1.0.0"
|
||||
git push origin v1.0.0
|
||||
```
|
||||
|
||||
## User Experience
|
||||
@@ -159,7 +158,7 @@ After installation, users will:
|
||||
To release a new version:
|
||||
|
||||
1. Make changes to the plugin
|
||||
2. Update version in `skills/unraid/.claude-plugin/plugin.json`
|
||||
2. Update version in `.claude-plugin/plugin.json`
|
||||
3. Update marketplace catalog in `.claude-plugin/marketplace.json`
|
||||
4. Run validation: `./scripts/validate-marketplace.sh`
|
||||
5. Commit and push
|
||||
|
||||
@@ -40,7 +40,7 @@ Before publishing, update the version in `pyproject.toml`:
|
||||
|
||||
```toml
|
||||
[project]
|
||||
version = "0.2.1" # Follow semantic versioning: MAJOR.MINOR.PATCH
|
||||
version = "1.0.0" # Follow semantic versioning: MAJOR.MINOR.PATCH
|
||||
```
|
||||
|
||||
**Semantic Versioning Guide:**
|
||||
@@ -156,7 +156,7 @@ UNRAID_API_URL=https://your-server uvx unraid-mcp-server
|
||||
**Benefits of uvx:**
|
||||
- No installation required
|
||||
- Automatic virtual environment management
|
||||
- Always uses the latest version (or specify version: `uvx unraid-mcp-server@0.2.0`)
|
||||
- Always uses the latest version (or specify version: `uvx unraid-mcp-server@1.0.0`)
|
||||
- Clean execution environment
|
||||
|
||||
## Automation with GitHub Actions (Future)
|
||||
|
||||
928
docs/UNRAID_API_REFERENCE.md
Normal file
928
docs/UNRAID_API_REFERENCE.md
Normal file
@@ -0,0 +1,928 @@
|
||||
# Unraid API v4.29.2 — Complete Reference
|
||||
|
||||
> **Source of truth.** Auto-generated from live GraphQL introspection against tootie (10.1.0.2:31337) on 2026-03-15.
|
||||
> Unraid 7.2.4 · API v4.29.2 · 46 queries · 22 mutations · 11 subscriptions · 156 types
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Authentication](#authentication)
|
||||
- [Scalars & ID Format](#scalars--id-format)
|
||||
- [Queries](#queries)
|
||||
- [System & Server Info](#system--server-info)
|
||||
- [Array & Storage](#array--storage)
|
||||
- [Docker](#docker)
|
||||
- [Virtual Machines](#virtual-machines)
|
||||
- [Notifications](#notifications)
|
||||
- [API Keys & Permissions](#api-keys--permissions)
|
||||
- [Users & Auth](#users--auth)
|
||||
- [RClone / Backup](#rclone--backup)
|
||||
- [UPS / Power](#ups--power)
|
||||
- [Settings & Configuration](#settings--configuration)
|
||||
- [Logs](#logs)
|
||||
- [OIDC / SSO](#oidc--sso)
|
||||
- [Plugins](#plugins)
|
||||
- [Mutations](#mutations)
|
||||
- [Notification Mutations](#notification-mutations)
|
||||
- [Array Mutations](#array-mutations)
|
||||
- [Docker Mutations](#docker-mutations)
|
||||
- [VM Mutations](#vm-mutations)
|
||||
- [Parity Check Mutations](#parity-check-mutations)
|
||||
- [API Key Mutations](#api-key-mutations)
|
||||
- [Customization Mutations](#customization-mutations)
|
||||
- [RClone Mutations](#rclone-mutations)
|
||||
- [Flash Backup](#flash-backup)
|
||||
- [Settings Mutations](#settings-mutations)
|
||||
- [Plugin Mutations](#plugin-mutations)
|
||||
- [Subscriptions](#subscriptions)
|
||||
- [Enums](#enums)
|
||||
- [Input Types](#input-types)
|
||||
- [Object Types (Full Field Reference)](#object-types-full-field-reference)
|
||||
|
||||
---
|
||||
|
||||
## Authentication
|
||||
|
||||
All requests require an API key passed via the `x-api-key` HTTP header:
|
||||
|
||||
```bash
|
||||
curl -k -X POST \
|
||||
-H "x-api-key: YOUR_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"query": "{ online }"}' \
|
||||
https://YOUR-SERVER/graphql
|
||||
```
|
||||
|
||||
**Rate limit:** 100 requests per 10 seconds.
|
||||
|
||||
---
|
||||
|
||||
## Scalars & ID Format
|
||||
|
||||
| Scalar | Description |
|
||||
|--------|-------------|
|
||||
| `PrefixedID` | Server-prefixed ID: `<serverHash>:<localId>`. Input accepts with or without prefix. Output always includes prefix. |
|
||||
| `BigInt` | Non-fractional signed whole numbers (for disk sizes in KB, memory in bytes, etc.) |
|
||||
| `DateTime` | ISO 8601 UTC string, e.g. `2026-03-15T09:54:33Z` |
|
||||
| `JSON` | Arbitrary JSON value (used for settings, labels, mount info) |
|
||||
| `Port` | Valid TCP port 0–65535 |
|
||||
|
||||
---
|
||||
|
||||
## Queries
|
||||
|
||||
### System & Server Info
|
||||
|
||||
#### `info` → `Info!`
|
||||
Full hardware and software information. Permission: `READ_ANY` on `INFO`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
info {
|
||||
time
|
||||
baseboard { manufacturer model version serial memMax memSlots }
|
||||
cpu { manufacturer brand cores threads speed speedmax socket topology packages { totalPower power temp } }
|
||||
devices {
|
||||
gpu { type vendorname productid blacklisted }
|
||||
network { iface model vendor mac speed dhcp }
|
||||
pci { type vendorname productname vendorid productid }
|
||||
usb { name bus device }
|
||||
}
|
||||
display { theme unit scale tabs resize wwn total usage text warning critical hot max locale
|
||||
case { url icon error base64 }
|
||||
}
|
||||
memory { layout { size bank type clockSpeed manufacturer formFactor partNum serialNum } }
|
||||
os { platform distro release kernel arch hostname fqdn uptime uefi }
|
||||
system { manufacturer model version serial uuid sku virtual }
|
||||
versions { core { unraid api kernel } packages { openssl node npm pm2 git nginx php docker } }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `vars` → `Vars!`
|
||||
143 system variables including hostname, timezone, array config, share settings, registration state, CSRF token, disk sync parameters, and much more. Permission: `READ_ANY` on `VARS`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
vars {
|
||||
version name timeZone comment security workgroup
|
||||
port portssl portssh useSsl useSsh useTelnet
|
||||
startArray spindownDelay shutdownTimeout
|
||||
shareCount shareSmbCount shareNfsCount
|
||||
regTy regState regTo
|
||||
mdNumDisks mdNumDisabled mdState mdResync
|
||||
configValid configError safeMode csrfToken
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `metrics` → `Metrics!`
|
||||
CPU and memory utilization. Permission: `READ_ANY` on `INFO`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
metrics {
|
||||
cpu { percentTotal cpus { percentTotal percentUser percentSystem percentIdle } }
|
||||
memory { total used free available active buffcache percentTotal swapTotal swapUsed swapFree percentSwapTotal }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `server` → `Server`
|
||||
Local server info. Permission: `READ_ANY` on `SERVERS`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
server { id name status guid apikey wanip lanip localurl remoteurl owner { username url avatar } }
|
||||
}
|
||||
```
|
||||
|
||||
#### `servers` → `[Server!]!`
|
||||
All registered servers (usually just the local one). Permission: `READ_ANY` on `SERVERS`.
|
||||
|
||||
#### `online` → `Boolean!`
|
||||
Simple connectivity check. Permission: `READ_ANY` on `ONLINE`.
|
||||
|
||||
#### `owner` → `Owner!`
|
||||
Server owner info. Permission: `READ_ANY` on `OWNER`. Returns `username`, `url`, `avatar`.
|
||||
|
||||
#### `registration` → `Registration`
|
||||
License info. Permission: `READ_ANY` on `REGISTRATION`. Returns `type`, `state`, `keyFile { location contents }`, `expiration`, `updateExpiration`.
|
||||
|
||||
#### `config` → `Config!`
|
||||
Configuration validity. Permission: `READ_ANY` on `CONFIG`. Returns `valid`, `error`.
|
||||
|
||||
#### `services` → `[Service!]!`
|
||||
Running services. Permission: `READ_ANY` on `SERVICES`. Each: `name`, `online`, `uptime { timestamp }`, `version`.
|
||||
|
||||
#### `flash` → `Flash!`
|
||||
Flash drive info. Permission: `READ_ANY` on `FLASH`. Returns `guid`, `vendor`, `product`.
|
||||
|
||||
#### `customization` → `Customization`
|
||||
UI customization. Permission: `READ_ANY` on `CUSTOMIZATIONS`. Returns `activationCode { ... }`, `partnerInfo { ... }`, `theme { ... }`.
|
||||
|
||||
#### `settings` → `Settings!`
|
||||
All settings including unified form, SSO, and API config.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
settings {
|
||||
unified { dataSchema uiSchema values }
|
||||
sso { oidcProviders { id name clientId issuer scopes } }
|
||||
api { version extraOrigins sandbox plugins }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `isInitialSetup` → `Boolean!`
|
||||
Whether server is in initial setup mode (no permission required).
|
||||
|
||||
---
|
||||
|
||||
### Array & Storage
|
||||
|
||||
#### `array` → `UnraidArray!`
|
||||
Array state with all disks. Permission: `READ_ANY` on `ARRAY`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
array {
|
||||
state
|
||||
capacity { kilobytes { free used total } disks { free used total } }
|
||||
boot { id name device size status temp type }
|
||||
parities { id name device size status temp numReads numWrites numErrors }
|
||||
parityCheckStatus { status progress speed errors duration correcting paused running }
|
||||
disks { id idx name device size status temp fsSize fsFree fsUsed type fsType color isSpinning numReads numWrites numErrors }
|
||||
caches { id name device size status temp fsSize fsFree fsUsed type }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `shares` → `[Share!]!`
|
||||
User shares. Permission: `READ_ANY` on `SHARE`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
shares { id name free used size include exclude cache nameOrig comment allocator splitLevel floor cow color luksStatus }
|
||||
}
|
||||
```
|
||||
|
||||
#### `disks` → `[Disk!]!`
|
||||
Physical disks (hardware-level). Permission: `READ_ANY` on `DISK`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
disks { id device type name vendor size serialNum firmwareRevision interfaceType smartStatus temperature isSpinning
|
||||
partitions { name fsType size } }
|
||||
}
|
||||
```
|
||||
|
||||
#### `disk(id: PrefixedID!)` → `Disk!`
|
||||
Single disk by ID. Permission: `READ_ANY` on `DISK`.
|
||||
|
||||
#### `parityHistory` → `[ParityCheck!]!`
|
||||
Parity check history. Permission: `READ_ANY` on `ARRAY`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
parityHistory { date duration speed status errors progress correcting paused running }
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Docker
|
||||
|
||||
#### `docker` → `Docker!`
|
||||
Container and network queries. Permission: `READ_ANY` on `DOCKER`.
|
||||
|
||||
**Available sub-fields on Docker type:**
|
||||
- `containers(skipCache: Boolean! = false)` → `[DockerContainer!]!`
|
||||
- `networks(skipCache: Boolean! = false)` → `[DockerNetwork!]!`
|
||||
|
||||
**That's it.** No `logs`, no `portConflicts`, no `containerUpdateStatuses`. Only `containers` and `networks`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
docker {
|
||||
containers(skipCache: false) {
|
||||
id names image imageId command created state status autoStart
|
||||
ports { ip privatePort publicPort type }
|
||||
sizeRootFs labels hostConfig { networkMode } networkSettings mounts
|
||||
}
|
||||
networks(skipCache: false) {
|
||||
id name created scope driver enableIPv6
|
||||
internal attachable ingress configOnly
|
||||
ipam containers options labels
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**DockerContainer fields:** `id`, `names`, `image`, `imageId`, `command`, `created`, `ports`, `sizeRootFs`, `labels`, `state` (RUNNING/EXITED), `status`, `hostConfig`, `networkSettings`, `mounts`, `autoStart`.
|
||||
|
||||
**DockerNetwork fields:** `id`, `name`, `created`, `scope`, `driver`, `enableIPv6`, `ipam`, `internal`, `attachable`, `ingress`, `configFrom`, `configOnly`, `containers`, `options`, `labels`.
|
||||
|
||||
---
|
||||
|
||||
### Virtual Machines
|
||||
|
||||
#### `vms` → `Vms!`
|
||||
All VMs. Permission: `READ_ANY` on `VMS`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
vms {
|
||||
domains { id name state uuid }
|
||||
domain { id name state }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**VmState enum:** `NOSTATE`, `RUNNING`, `IDLE`, `PAUSED`, `SHUTDOWN`, `SHUTOFF`, `CRASHED`, `PMSUSPENDED`.
|
||||
|
||||
---
|
||||
|
||||
### Notifications
|
||||
|
||||
#### `notifications` → `Notifications!`
|
||||
Overview and list. Permission: `READ_ANY` on `NOTIFICATIONS`.
|
||||
|
||||
```graphql
|
||||
# Overview (counts by severity)
|
||||
query {
|
||||
notifications {
|
||||
overview {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# List with filter
|
||||
query {
|
||||
notifications {
|
||||
list(filter: { type: UNREAD, offset: 0, limit: 20, importance: WARNING }) {
|
||||
id title subject description importance link type timestamp formattedTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**NotificationFilter input:** `type` (UNREAD/ARCHIVE, required), `offset` (required), `limit` (required), `importance` (optional: INFO/WARNING/ALERT).
|
||||
|
||||
---
|
||||
|
||||
### API Keys & Permissions
|
||||
|
||||
#### `apiKeys` → `[ApiKey!]!`
|
||||
All API keys. Permission: `READ_ANY` on `API_KEY`.
|
||||
|
||||
```graphql
|
||||
query { apiKeys { id key name description roles createdAt permissions { resource actions } } }
|
||||
```
|
||||
|
||||
#### `apiKey(id: PrefixedID!)` → `ApiKey`
|
||||
Single API key by ID. Permission: `READ_ANY` on `API_KEY`.
|
||||
|
||||
#### `apiKeyPossibleRoles` → `[Role!]!`
|
||||
Available roles (ADMIN, CONNECT, GUEST, VIEWER). Permission: `READ_ANY` on `PERMISSION`.
|
||||
|
||||
#### `apiKeyPossiblePermissions` → `[Permission!]!`
|
||||
All possible permissions. Permission: `READ_ANY` on `PERMISSION`.
|
||||
|
||||
#### `getPermissionsForRoles(roles: [Role!]!)` → `[Permission!]!`
|
||||
Resolve roles to actual permissions. Permission: `READ_ANY` on `PERMISSION`.
|
||||
|
||||
#### `previewEffectivePermissions(roles: [Role!], permissions: [AddPermissionInput!])` → `[Permission!]!`
|
||||
Preview effective permissions for a role/permission combo. Permission: `READ_ANY` on `PERMISSION`.
|
||||
|
||||
#### `getAvailableAuthActions` → `[AuthAction!]!`
|
||||
All auth actions (CREATE_ANY, READ_OWN, etc.).
|
||||
|
||||
#### `getApiKeyCreationFormSchema` → `ApiKeyFormSettings!`
|
||||
JSON Schema for API key creation form. Permission: `READ_ANY` on `API_KEY`. Returns `dataSchema`, `uiSchema`, `values`.
|
||||
|
||||
---
|
||||
|
||||
### Users & Auth
|
||||
|
||||
#### `me` → `UserAccount!`
|
||||
Current authenticated user. Permission: `READ_ANY` on `ME`.
|
||||
|
||||
```graphql
|
||||
query { me { id name description roles permissions { resource actions } } }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### RClone / Backup
|
||||
|
||||
#### `rclone` → `RCloneBackupSettings!`
|
||||
RClone configuration. Permission: `READ_ANY` on `FLASH`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
rclone {
|
||||
remotes { name type parameters config }
|
||||
drives { name options }
|
||||
configForm(formOptions: { providerType: "drive", showAdvanced: false }) {
|
||||
id dataSchema uiSchema
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### UPS / Power
|
||||
|
||||
#### `upsDevices` → `[UPSDevice!]!`
|
||||
All UPS devices.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
upsDevices {
|
||||
id name model status
|
||||
battery { chargeLevel estimatedRuntime health }
|
||||
power { inputVoltage outputVoltage loadPercentage }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `upsDeviceById(id: String!)` → `UPSDevice`
|
||||
Single UPS by ID.
|
||||
|
||||
#### `upsConfiguration` → `UPSConfiguration!`
|
||||
UPS daemon configuration.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
upsConfiguration {
|
||||
service upsCable customUpsCable upsType device
|
||||
overrideUpsCapacity batteryLevel minutes timeout killUps
|
||||
nisIp netServer upsName modelName
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Logs
|
||||
|
||||
#### `logFiles` → `[LogFile!]!`
|
||||
Available log files. Permission: `READ_ANY` on `LOGS`.
|
||||
|
||||
```graphql
|
||||
query { logFiles { name path size modifiedAt } }
|
||||
```
|
||||
|
||||
#### `logFile(path: String!, lines: Int, startLine: Int)` → `LogFileContent!`
|
||||
Read a log file. Permission: `READ_ANY` on `LOGS`.
|
||||
|
||||
```graphql
|
||||
query { logFile(path: "/var/log/syslog", lines: 100, startLine: 1) { path content totalLines startLine } }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### OIDC / SSO
|
||||
|
||||
#### `isSSOEnabled` → `Boolean!`
|
||||
Whether SSO is enabled.
|
||||
|
||||
#### `publicOidcProviders` → `[PublicOidcProvider!]!`
|
||||
Public OIDC provider info for login buttons. Returns `id`, `name`, `buttonText`, `buttonIcon`, `buttonVariant`, `buttonStyle`.
|
||||
|
||||
#### `oidcProviders` → `[OidcProvider!]!`
|
||||
All configured OIDC providers (admin only). Permission: `READ_ANY` on `CONFIG`.
|
||||
|
||||
#### `oidcProvider(id: PrefixedID!)` → `OidcProvider`
|
||||
Single OIDC provider by ID. Permission: `READ_ANY` on `CONFIG`.
|
||||
|
||||
#### `oidcConfiguration` → `OidcConfiguration!`
|
||||
Full OIDC configuration. Permission: `READ_ANY` on `CONFIG`. Returns `providers` list and `defaultAllowedOrigins`.
|
||||
|
||||
#### `validateOidcSession(token: String!)` → `OidcSessionValidation!`
|
||||
Validate an OIDC session token. Permission: `READ_ANY` on `CONFIG`. Returns `valid`, `username`.
|
||||
|
||||
---
|
||||
|
||||
### Plugins
|
||||
|
||||
#### `plugins` → `[Plugin!]!`
|
||||
Installed plugins. Permission: `READ_ANY` on `CONFIG`.
|
||||
|
||||
```graphql
|
||||
query { plugins { name version hasApiModule hasCliModule } }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Mutations
|
||||
|
||||
### Notification Mutations
|
||||
|
||||
All notification mutations are **root-level** on the Mutation type.
|
||||
|
||||
| Mutation | Args | Returns | Description |
|
||||
|----------|------|---------|-------------|
|
||||
| `createNotification` | `input: NotificationData!` | `Notification!` | Create a new notification |
|
||||
| `archiveNotification` | `id: PrefixedID!` | `Notification!` | Mark as archived |
|
||||
| `archiveNotifications` | `ids: [PrefixedID!]!` | `NotificationOverview!` | Archive multiple |
|
||||
| `archiveAll` | `importance: NotificationImportance` | `NotificationOverview!` | Archive all (optional filter) |
|
||||
| `unreadNotification` | `id: PrefixedID!` | `Notification!` | Mark as unread |
|
||||
| `unarchiveNotifications` | `ids: [PrefixedID!]!` | `NotificationOverview!` | Unarchive multiple |
|
||||
| `unarchiveAll` | `importance: NotificationImportance` | `NotificationOverview!` | Unarchive all (optional filter) |
|
||||
| `deleteNotification` | `id: PrefixedID!`, `type: NotificationType!` | `NotificationOverview!` | Delete one notification |
|
||||
| `deleteArchivedNotifications` | — | `NotificationOverview!` | Delete ALL archived |
|
||||
| `recalculateOverview` | — | `NotificationOverview!` | Recompute counts from disk |
|
||||
|
||||
---
|
||||
|
||||
### Array Mutations
|
||||
|
||||
Nested under `mutation { array { ... } }` → `ArrayMutations!`
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `setState` | `input: ArrayStateInput!` | `UnraidArray!` | `UPDATE_ANY` on `ARRAY` | Start/stop array (`desiredState: START/STOP`) |
|
||||
| `addDiskToArray` | `input: ArrayDiskInput!` | `UnraidArray!` | `UPDATE_ANY` on `ARRAY` | Add disk to array |
|
||||
| `removeDiskFromArray` | `input: ArrayDiskInput!` | `UnraidArray!` | `UPDATE_ANY` on `ARRAY` | Remove disk (array must be stopped) |
|
||||
| `mountArrayDisk` | `id: PrefixedID!` | `ArrayDisk!` | `UPDATE_ANY` on `ARRAY` | Mount a disk |
|
||||
| `unmountArrayDisk` | `id: PrefixedID!` | `ArrayDisk!` | `UPDATE_ANY` on `ARRAY` | Unmount a disk |
|
||||
| `clearArrayDiskStatistics` | `id: PrefixedID!` | `Boolean!` | `UPDATE_ANY` on `ARRAY` | Clear disk I/O stats |
|
||||
|
||||
---
|
||||
|
||||
### Docker Mutations
|
||||
|
||||
Nested under `mutation { docker { ... } }` → `DockerMutations!`
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `start` | `id: PrefixedID!` | `DockerContainer!` | `UPDATE_ANY` on `DOCKER` | Start a container |
|
||||
| `stop` | `id: PrefixedID!` | `DockerContainer!` | `UPDATE_ANY` on `DOCKER` | Stop a container |
|
||||
|
||||
**That's it.** No pause, unpause, remove, update, or organizer mutations exist.
|
||||
|
||||
---
|
||||
|
||||
### VM Mutations
|
||||
|
||||
Nested under `mutation { vm { ... } }` → `VmMutations!`
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `start` | `id: PrefixedID!` | `Boolean!` | `UPDATE_ANY` on `VMS` | Start VM |
|
||||
| `stop` | `id: PrefixedID!` | `Boolean!` | `UPDATE_ANY` on `VMS` | Graceful stop |
|
||||
| `pause` | `id: PrefixedID!` | `Boolean!` | `UPDATE_ANY` on `VMS` | Pause VM |
|
||||
| `resume` | `id: PrefixedID!` | `Boolean!` | `UPDATE_ANY` on `VMS` | Resume paused VM |
|
||||
| `forceStop` | `id: PrefixedID!` | `Boolean!` | `UPDATE_ANY` on `VMS` | Force stop (hard power off) |
|
||||
| `reboot` | `id: PrefixedID!` | `Boolean!` | `UPDATE_ANY` on `VMS` | Reboot VM |
|
||||
| `reset` | `id: PrefixedID!` | `Boolean!` | `UPDATE_ANY` on `VMS` | Reset VM (hard reboot) |
|
||||
|
||||
---
|
||||
|
||||
### Parity Check Mutations
|
||||
|
||||
Nested under `mutation { parityCheck { ... } }` → `ParityCheckMutations!`
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `start` | `correct: Boolean!` | `JSON!` | `UPDATE_ANY` on `ARRAY` | Start parity check (correct=true writes fixes) |
|
||||
| `pause` | — | `JSON!` | `UPDATE_ANY` on `ARRAY` | Pause running check |
|
||||
| `resume` | — | `JSON!` | `UPDATE_ANY` on `ARRAY` | Resume paused check |
|
||||
| `cancel` | — | `JSON!` | `UPDATE_ANY` on `ARRAY` | Cancel running check |
|
||||
|
||||
> **Note:** Response types are `JSON!` — this API is marked WIP and types will change.
|
||||
|
||||
---
|
||||
|
||||
### API Key Mutations
|
||||
|
||||
Nested under `mutation { apiKey { ... } }` → `ApiKeyMutations!`
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `create` | `input: CreateApiKeyInput!` | `ApiKey!` | `CREATE_ANY` on `API_KEY` | Create API key |
|
||||
| `update` | `input: UpdateApiKeyInput!` | `ApiKey!` | `UPDATE_ANY` on `API_KEY` | Update API key |
|
||||
| `delete` | `input: DeleteApiKeyInput!` | `Boolean!` | `DELETE_ANY` on `API_KEY` | Delete one or more keys |
|
||||
| `addRole` | `input: AddRoleForApiKeyInput!` | `Boolean!` | `UPDATE_ANY` on `API_KEY` | Add role to key |
|
||||
| `removeRole` | `input: RemoveRoleFromApiKeyInput!` | `Boolean!` | `UPDATE_ANY` on `API_KEY` | Remove role from key |
|
||||
|
||||
---
|
||||
|
||||
### Customization Mutations
|
||||
|
||||
Nested under `mutation { customization { ... } }` → `CustomizationMutations!`
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `setTheme` | `theme: ThemeName!` | `Theme!` | `UPDATE_ANY` on `CUSTOMIZATIONS` | Update UI theme (azure/black/gray/white) |
|
||||
|
||||
---
|
||||
|
||||
### RClone Mutations
|
||||
|
||||
Nested under `mutation { rclone { ... } }` → `RCloneMutations!`
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `createRCloneRemote` | `input: CreateRCloneRemoteInput!` | `RCloneRemote!` | `CREATE_ANY` on `FLASH` | Create remote |
|
||||
| `deleteRCloneRemote` | `input: DeleteRCloneRemoteInput!` | `Boolean!` | `DELETE_ANY` on `FLASH` | Delete remote |
|
||||
|
||||
---
|
||||
|
||||
### Flash Backup
|
||||
|
||||
Root-level mutation.
|
||||
|
||||
| Mutation | Args | Returns | Description |
|
||||
|----------|------|---------|-------------|
|
||||
| `initiateFlashBackup` | `input: InitiateFlashBackupInput!` | `FlashBackupStatus!` | Start flash backup to remote |
|
||||
|
||||
**InitiateFlashBackupInput:** `remoteName: String!`, `sourcePath: String!`, `destinationPath: String!`, `options: JSON`
|
||||
|
||||
Returns: `status: String!`, `jobId: String`
|
||||
|
||||
---
|
||||
|
||||
### Settings Mutations
|
||||
|
||||
Root-level mutations.
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `updateSettings` | `input: JSON!` | `UpdateSettingsResponse!` | `UPDATE_ANY` on `CONFIG` | Update server settings |
|
||||
| `configureUps` | `config: UPSConfigInput!` | `Boolean!` | — | Configure UPS daemon |
|
||||
|
||||
**UpdateSettingsResponse:** `restartRequired: Boolean!`, `values: JSON!`, `warnings: [String!]`
|
||||
|
||||
---
|
||||
|
||||
### Plugin Mutations
|
||||
|
||||
Root-level mutations.
|
||||
|
||||
| Mutation | Args | Returns | Permission | Description |
|
||||
|----------|------|---------|------------|-------------|
|
||||
| `addPlugin` | `input: PluginManagementInput!` | `Boolean!` | `UPDATE_ANY` on `CONFIG` | Install plugin(s). Returns false if auto-restart triggered. |
|
||||
| `removePlugin` | `input: PluginManagementInput!` | `Boolean!` | `DELETE_ANY` on `CONFIG` | Remove plugin(s). Returns false if auto-restart triggered. |
|
||||
|
||||
---
|
||||
|
||||
## Subscriptions
|
||||
|
||||
WebSocket-based real-time data (graphql-ws protocol).
|
||||
|
||||
| Subscription | Returns | Permission | Description |
|
||||
|-------------|---------|------------|-------------|
|
||||
| `notificationAdded` | `Notification!` | `READ_ANY` on `NOTIFICATIONS` | New notification created |
|
||||
| `notificationsOverview` | `NotificationOverview!` | `READ_ANY` on `NOTIFICATIONS` | Overview counts change |
|
||||
| `ownerSubscription` | `Owner!` | `READ_ANY` on `OWNER` | Owner info change |
|
||||
| `serversSubscription` | `Server!` | `READ_ANY` on `SERVERS` | Server state change |
|
||||
| `parityHistorySubscription` | `ParityCheck!` | `READ_ANY` on `ARRAY` | Parity check updates |
|
||||
| `arraySubscription` | `UnraidArray!` | `READ_ANY` on `ARRAY` | Array state changes |
|
||||
| `logFile(path: String!)` | `LogFileContent!` | `READ_ANY` on `LOGS` | Live log file tail |
|
||||
| `systemMetricsCpu` | `CpuUtilization!` | `READ_ANY` on `INFO` | CPU utilization stream |
|
||||
| `systemMetricsCpuTelemetry` | `CpuPackages!` | `READ_ANY` on `INFO` | CPU power/temp stream |
|
||||
| `systemMetricsMemory` | `MemoryUtilization!` | `READ_ANY` on `INFO` | Memory utilization stream |
|
||||
| `upsUpdates` | `UPSDevice!` | — | UPS state changes |
|
||||
|
||||
---
|
||||
|
||||
## Enums
|
||||
|
||||
### ArrayDiskFsColor
|
||||
`GREEN_ON` · `GREEN_BLINK` · `BLUE_ON` · `BLUE_BLINK` · `YELLOW_ON` · `YELLOW_BLINK` · `RED_ON` · `RED_OFF` · `GREY_OFF`
|
||||
|
||||
### ArrayDiskStatus
|
||||
`DISK_NP` · `DISK_OK` · `DISK_NP_MISSING` · `DISK_INVALID` · `DISK_WRONG` · `DISK_DSBL` · `DISK_NP_DSBL` · `DISK_DSBL_NEW` · `DISK_NEW`
|
||||
|
||||
### ArrayDiskType
|
||||
`DATA` · `PARITY` · `FLASH` · `CACHE`
|
||||
|
||||
### ArrayState
|
||||
`STARTED` · `STOPPED` · `NEW_ARRAY` · `RECON_DISK` · `DISABLE_DISK` · `SWAP_DSBL` · `INVALID_EXPANSION` · `PARITY_NOT_BIGGEST` · `TOO_MANY_MISSING_DISKS` · `NEW_DISK_TOO_SMALL` · `NO_DATA_DISKS`
|
||||
|
||||
### ArrayStateInputState
|
||||
`START` · `STOP`
|
||||
|
||||
### AuthAction
|
||||
`CREATE_ANY` · `CREATE_OWN` · `READ_ANY` · `READ_OWN` · `UPDATE_ANY` · `UPDATE_OWN` · `DELETE_ANY` · `DELETE_OWN`
|
||||
|
||||
### AuthorizationOperator
|
||||
`EQUALS` · `CONTAINS` · `ENDS_WITH` · `STARTS_WITH`
|
||||
|
||||
### AuthorizationRuleMode
|
||||
`OR` · `AND`
|
||||
|
||||
### ConfigErrorState
|
||||
`UNKNOWN_ERROR` · `INELIGIBLE` · `INVALID` · `NO_KEY_SERVER` · `WITHDRAWN`
|
||||
|
||||
### ContainerPortType
|
||||
`TCP` · `UDP`
|
||||
|
||||
### ContainerState
|
||||
`RUNNING` · `EXITED`
|
||||
|
||||
### DiskFsType
|
||||
`XFS` · `BTRFS` · `VFAT` · `ZFS` · `EXT4` · `NTFS`
|
||||
|
||||
### DiskInterfaceType
|
||||
`SAS` · `SATA` · `USB` · `PCIE` · `UNKNOWN`
|
||||
|
||||
### DiskSmartStatus
|
||||
`OK` · `UNKNOWN`
|
||||
|
||||
### NotificationImportance
|
||||
`ALERT` · `INFO` · `WARNING`
|
||||
|
||||
### NotificationType
|
||||
`UNREAD` · `ARCHIVE`
|
||||
|
||||
### ParityCheckStatus
|
||||
`NEVER_RUN` · `RUNNING` · `PAUSED` · `COMPLETED` · `CANCELLED` · `FAILED`
|
||||
|
||||
### RegistrationState
|
||||
`TRIAL` · `BASIC` · `PLUS` · `PRO` · `STARTER` · `UNLEASHED` · `LIFETIME` · `EEXPIRED` · `EGUID` · `EGUID1` · `ETRIAL` · `ENOKEYFILE` · `ENOKEYFILE1` · `ENOKEYFILE2` · `ENOFLASH` · `ENOFLASH1` · `ENOFLASH2` · `ENOFLASH3` · `ENOFLASH4` · `ENOFLASH5` · `ENOFLASH6` · `ENOFLASH7` · `EBLACKLISTED` · `EBLACKLISTED1` · `EBLACKLISTED2` · `ENOCONN`
|
||||
|
||||
### Resource
|
||||
`ACTIVATION_CODE` · `API_KEY` · `ARRAY` · `CLOUD` · `CONFIG` · `CONNECT` · `CONNECT__REMOTE_ACCESS` · `CUSTOMIZATIONS` · `DASHBOARD` · `DISK` · `DISPLAY` · `DOCKER` · `FLASH` · `INFO` · `LOGS` · `ME` · `NETWORK` · `NOTIFICATIONS` · `ONLINE` · `OS` · `OWNER` · `PERMISSION` · `REGISTRATION` · `SERVERS` · `SERVICES` · `SHARE` · `VARS` · `VMS` · `WELCOME`
|
||||
|
||||
### Role
|
||||
- `ADMIN` — Full administrative access
|
||||
- `CONNECT` — Internal role for Unraid Connect
|
||||
- `GUEST` — Basic read access (user profile only)
|
||||
- `VIEWER` — Read-only access to all resources
|
||||
|
||||
### ServerStatus
|
||||
`ONLINE` · `OFFLINE` · `NEVER_CONNECTED`
|
||||
|
||||
### Temperature
|
||||
`CELSIUS` · `FAHRENHEIT`
|
||||
|
||||
### ThemeName
|
||||
`azure` · `black` · `gray` · `white`
|
||||
|
||||
### UPSCableType
|
||||
`USB` · `SIMPLE` · `SMART` · `ETHER` · `CUSTOM`
|
||||
|
||||
### UPSKillPower
|
||||
`YES` · `NO`
|
||||
|
||||
### UPSServiceState
|
||||
`ENABLE` · `DISABLE`
|
||||
|
||||
### UPSType
|
||||
`USB` · `APCSMART` · `NET` · `SNMP` · `DUMB` · `PCNET` · `MODBUS`
|
||||
|
||||
### UpdateStatus
|
||||
`UP_TO_DATE` · `UPDATE_AVAILABLE` · `REBUILD_READY` · `UNKNOWN`
|
||||
|
||||
### VmState
|
||||
`NOSTATE` · `RUNNING` · `IDLE` · `PAUSED` · `SHUTDOWN` · `SHUTOFF` · `CRASHED` · `PMSUSPENDED`
|
||||
|
||||
### registrationType
|
||||
`BASIC` · `PLUS` · `PRO` · `STARTER` · `UNLEASHED` · `LIFETIME` · `INVALID` · `TRIAL`
|
||||
|
||||
---
|
||||
|
||||
## Input Types
|
||||
|
||||
### NotificationData
|
||||
```graphql
|
||||
input NotificationData {
|
||||
title: String!
|
||||
subject: String!
|
||||
description: String!
|
||||
importance: NotificationImportance!
|
||||
link: String
|
||||
}
|
||||
```
|
||||
|
||||
### NotificationFilter
|
||||
```graphql
|
||||
input NotificationFilter {
|
||||
importance: NotificationImportance # optional filter
|
||||
type: NotificationType! # UNREAD or ARCHIVE
|
||||
offset: Int!
|
||||
limit: Int!
|
||||
}
|
||||
```
|
||||
|
||||
### ArrayStateInput
|
||||
```graphql
|
||||
input ArrayStateInput {
|
||||
desiredState: ArrayStateInputState! # START or STOP
|
||||
}
|
||||
```
|
||||
|
||||
### ArrayDiskInput
|
||||
```graphql
|
||||
input ArrayDiskInput {
|
||||
id: PrefixedID!
|
||||
slot: Int # optional slot number
|
||||
}
|
||||
```
|
||||
|
||||
### CreateApiKeyInput
|
||||
```graphql
|
||||
input CreateApiKeyInput {
|
||||
name: String!
|
||||
description: String
|
||||
roles: [Role!]
|
||||
permissions: [AddPermissionInput!]
|
||||
overwrite: Boolean # replace existing key with same name
|
||||
}
|
||||
```
|
||||
|
||||
### UpdateApiKeyInput
|
||||
```graphql
|
||||
input UpdateApiKeyInput {
|
||||
id: PrefixedID!
|
||||
name: String
|
||||
description: String
|
||||
roles: [Role!]
|
||||
permissions: [AddPermissionInput!]
|
||||
}
|
||||
```
|
||||
|
||||
### DeleteApiKeyInput
|
||||
```graphql
|
||||
input DeleteApiKeyInput {
|
||||
ids: [PrefixedID!]!
|
||||
}
|
||||
```
|
||||
|
||||
### AddPermissionInput
|
||||
```graphql
|
||||
input AddPermissionInput {
|
||||
resource: Resource!
|
||||
actions: [AuthAction!]!
|
||||
}
|
||||
```
|
||||
|
||||
### AddRoleForApiKeyInput / RemoveRoleFromApiKeyInput
|
||||
```graphql
|
||||
input AddRoleForApiKeyInput {
|
||||
apiKeyId: PrefixedID!
|
||||
role: Role!
|
||||
}
|
||||
input RemoveRoleFromApiKeyInput {
|
||||
apiKeyId: PrefixedID!
|
||||
role: Role!
|
||||
}
|
||||
```
|
||||
|
||||
### CreateRCloneRemoteInput
|
||||
```graphql
|
||||
input CreateRCloneRemoteInput {
|
||||
name: String!
|
||||
type: String! # e.g. "drive", "s3", "sftp"
|
||||
parameters: JSON! # provider-specific config
|
||||
}
|
||||
```
|
||||
|
||||
### DeleteRCloneRemoteInput
|
||||
```graphql
|
||||
input DeleteRCloneRemoteInput {
|
||||
name: String!
|
||||
}
|
||||
```
|
||||
|
||||
### RCloneConfigFormInput
|
||||
```graphql
|
||||
input RCloneConfigFormInput {
|
||||
providerType: String
|
||||
showAdvanced: Boolean = false
|
||||
parameters: JSON
|
||||
}
|
||||
```
|
||||
|
||||
### InitiateFlashBackupInput
|
||||
```graphql
|
||||
input InitiateFlashBackupInput {
|
||||
remoteName: String! # configured remote name
|
||||
sourcePath: String! # e.g. "/boot"
|
||||
destinationPath: String! # remote destination path
|
||||
options: JSON # e.g. {"--dry-run": true}
|
||||
}
|
||||
```
|
||||
|
||||
### UPSConfigInput
|
||||
```graphql
|
||||
input UPSConfigInput {
|
||||
service: UPSServiceState # ENABLE or DISABLE
|
||||
upsCable: UPSCableType # USB, SIMPLE, SMART, ETHER, CUSTOM
|
||||
customUpsCable: String # only when upsCable=CUSTOM
|
||||
upsType: UPSType # USB, APCSMART, NET, SNMP, DUMB, PCNET, MODBUS
|
||||
device: String # /dev/ttyUSB0 or IP:port
|
||||
overrideUpsCapacity: Int # watts
|
||||
batteryLevel: Int # 0-100 percent shutdown threshold
|
||||
minutes: Int # runtime minutes shutdown threshold
|
||||
timeout: Int # seconds, 0=disable
|
||||
killUps: UPSKillPower # YES or NO
|
||||
}
|
||||
```
|
||||
|
||||
### PluginManagementInput
|
||||
```graphql
|
||||
input PluginManagementInput {
|
||||
names: [String!]!
|
||||
bundled: Boolean! = false # treat as bundled plugins
|
||||
restart: Boolean! = true # auto-restart API after operation
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Object Types (Full Field Reference)
|
||||
|
||||
### Key Types Quick Reference
|
||||
|
||||
| Type | Key Fields |
|
||||
|------|-----------|
|
||||
| `UnraidArray` | `state`, `capacity`, `boot`, `parities[]`, `parityCheckStatus`, `disks[]`, `caches[]` |
|
||||
| `ArrayDisk` | `id`, `idx`, `name`, `device`, `size`, `status`, `temp`, `numReads/Writes/Errors`, `fsSize/Free/Used`, `type`, `color`, `isSpinning` |
|
||||
| `Disk` | `id`, `device`, `type`, `name`, `vendor`, `size`, `serialNum`, `interfaceType`, `smartStatus`, `temperature`, `partitions[]`, `isSpinning` |
|
||||
| `DockerContainer` | `id`, `names[]`, `image`, `state`, `status`, `ports[]`, `autoStart`, `labels`, `mounts[]` |
|
||||
| `DockerNetwork` | `id`, `name`, `driver`, `scope`, `internal`, `attachable`, `containers`, `ipam` |
|
||||
| `VmDomain` | `id`, `name`, `state`, `uuid` (deprecated) |
|
||||
| `Notification` | `id`, `title`, `subject`, `description`, `importance`, `type`, `timestamp` |
|
||||
| `Info` | `time`, `baseboard`, `cpu`, `devices`, `display`, `memory`, `os`, `system`, `versions` |
|
||||
| `Metrics` | `cpu { percentTotal, cpus[] }`, `memory { total, used, free, percentTotal }` |
|
||||
| `Share` | `id`, `name`, `free`, `used`, `size`, `include[]`, `exclude[]`, `cache`, `comment` |
|
||||
| `ApiKey` | `id`, `key`, `name`, `description`, `roles[]`, `permissions[]`, `createdAt` |
|
||||
| `UserAccount` | `id`, `name`, `description`, `roles[]`, `permissions[]` |
|
||||
| `Server` | `id`, `name`, `status`, `guid`, `wanip`, `lanip`, `localurl`, `remoteurl`, `owner` |
|
||||
| `Service` | `id`, `name`, `online`, `uptime`, `version` |
|
||||
| `Owner` | `username`, `url`, `avatar` |
|
||||
| `Registration` | `type`, `state`, `keyFile`, `expiration`, `updateExpiration` |
|
||||
| `Vars` | 143 fields — hostname, timezone, array state, share config, registration, tuning params |
|
||||
| `UPSDevice` | `id`, `name`, `model`, `status`, `battery { chargeLevel, estimatedRuntime, health }`, `power { inputVoltage, outputVoltage, loadPercentage }` |
|
||||
| `UPSConfiguration` | `service`, `upsCable`, `upsType`, `device`, `batteryLevel`, `minutes`, `timeout`, `killUps`, + 4 more |
|
||||
| `RCloneRemote` | `name`, `type`, `parameters`, `config` |
|
||||
| `Settings` | `unified { dataSchema, uiSchema, values }`, `sso { oidcProviders[] }`, `api { version, extraOrigins }` |
|
||||
| `Flash` | `guid`, `vendor`, `product` |
|
||||
| `ParityCheck` | `date`, `duration`, `speed`, `status`, `errors`, `progress`, `correcting`, `paused`, `running` |
|
||||
| `Plugin` | `name`, `version`, `hasApiModule`, `hasCliModule` |
|
||||
|
||||
---
|
||||
|
||||
## Schema Statistics
|
||||
|
||||
| Category | Count |
|
||||
|----------|-------|
|
||||
| Query fields | 46 |
|
||||
| Mutation fields | 22 |
|
||||
| Subscription fields | 11 |
|
||||
| Object types | 94 |
|
||||
| Input types | 16 |
|
||||
| Enum types | 30 |
|
||||
| Scalar types | 10 |
|
||||
| Union types | 1 |
|
||||
| Interface types | 2 |
|
||||
| **Total types** | **156** |
|
||||
@@ -1,616 +0,0 @@
|
||||
# Competitive Analysis: Unraid Integration Projects
|
||||
|
||||
> **Date:** 2026-02-07
|
||||
> **Purpose:** Identify features and capabilities that competing Unraid integration projects offer that our `unraid-mcp` server (10 tools, 76 actions, GraphQL-based) currently lacks.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Executive Summary](#executive-summary)
|
||||
- [Project Profiles](#project-profiles)
|
||||
- [1. unraid-management-agent (Go plugin)](#1-unraid-management-agent)
|
||||
- [2. domalab/unraid-api-client (Python library)](#2-domalabunraid-api-client)
|
||||
- [3. mcp-ssh-sre / unraid-ssh-mcp (SSH-based MCP)](#3-mcp-ssh-sre--unraid-ssh-mcp)
|
||||
- [4. PSUnraid (PowerShell module)](#4-psunraid)
|
||||
- [5. ha-unraid (Home Assistant integration)](#5-ha-unraid-home-assistant-integration)
|
||||
- [6. chris-mc1/unraid_api (HA integration)](#6-chris-mc1unraid_api-ha-integration)
|
||||
- [Feature Matrix](#feature-matrix)
|
||||
- [Gap Analysis](#gap-analysis)
|
||||
- [Recommended Priorities](#recommended-priorities)
|
||||
- [Sources](#sources)
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Our `unraid-mcp` server provides 10 MCP tools (76 actions) built on the official Unraid GraphQL API. After analyzing six competing projects, we identified several significant gaps:
|
||||
|
||||
**Critical gaps (high-value features we lack):**
|
||||
1. **Array control operations** (start/stop array, parity check control, disk spin up/down)
|
||||
2. **UPS monitoring** (battery level, load, runtime, power status)
|
||||
3. **GPU metrics** (utilization, temperature, memory, power draw)
|
||||
4. **SMART disk health data** (per-disk SMART status, errors, power-on hours)
|
||||
5. **Parity check history** (dates, durations, error counts)
|
||||
6. **System reboot/shutdown** commands
|
||||
7. **Services status** (running system services)
|
||||
8. **Flash drive info** (boot device monitoring)
|
||||
9. **Plugins list** (installed plugins)
|
||||
|
||||
**Moderate gaps (nice-to-have features):**
|
||||
10. **Docker container resource metrics** (CPU %, memory usage per container)
|
||||
11. **Docker container pause/unpause** operations
|
||||
12. **ZFS pool/dataset/snapshot management**
|
||||
13. **User script execution** (User Scripts plugin integration)
|
||||
14. **Network bandwidth monitoring** (per-interface stats)
|
||||
15. **Prometheus metrics endpoint**
|
||||
16. **MQTT event publishing**
|
||||
17. **WebSocket real-time streaming** (not just subscription diagnostics)
|
||||
18. **MCP Resources** (subscribable data streams)
|
||||
19. **MCP Prompts** (guided interaction templates)
|
||||
20. **Unassigned devices** monitoring
|
||||
|
||||
**Architectural gaps:**
|
||||
21. No confirmation/safety mechanism for destructive operations
|
||||
22. No Pydantic response models (type-safe responses)
|
||||
23. No Docker network listing
|
||||
24. No container update capability
|
||||
25. No owner/cloud/remote-access info queries
|
||||
|
||||
---
|
||||
|
||||
## Project Profiles
|
||||
|
||||
### 1. unraid-management-agent
|
||||
|
||||
- **Repository:** [ruaan-deysel/unraid-management-agent](https://github.com/ruaan-deysel/unraid-management-agent)
|
||||
- **Language:** Go
|
||||
- **Architecture:** Unraid plugin with REST API + WebSocket + MCP + Prometheus + MQTT
|
||||
- **API Type:** REST (59 endpoints) + WebSocket (9 event types) + MCP (54 tools)
|
||||
- **Data Collection:** Native Go libraries (Docker SDK, libvirt, /proc, /sys) -- does NOT depend on the GraphQL API
|
||||
- **Stars/Activity:** Active development, comprehensive documentation
|
||||
|
||||
**Key differentiators from our project:**
|
||||
- Runs as an Unraid plugin directly on the server (no external dependency on GraphQL API)
|
||||
- Collects data directly from /proc, /sys, Docker SDK, and libvirt
|
||||
- 59 REST endpoints vs our 10 MCP tools (76 actions)
|
||||
- 54 MCP tools with Resources and Prompts
|
||||
- Real-time WebSocket event streaming (9 event types, 5-60s intervals)
|
||||
- 41 Prometheus metrics for Grafana dashboards
|
||||
- MQTT publishing for Home Assistant/IoT integration
|
||||
- Confirmation-required destructive operations (`confirm: true` parameter)
|
||||
- Collector management (enable/disable collectors, adjust intervals)
|
||||
- System reboot and shutdown commands
|
||||
|
||||
**Unique capabilities not available via GraphQL API:**
|
||||
- GPU metrics (utilization, temperature, memory, power draw via nvidia-smi)
|
||||
- UPS metrics via NUT (Network UPS Tools) direct integration
|
||||
- Fan RPM readings from /sys
|
||||
- Motherboard temperature from /sys
|
||||
- SMART disk data (power-on hours, power cycles, read/write bytes, I/O utilization)
|
||||
- Network interface bandwidth (rx/tx bytes, real-time)
|
||||
- Docker container resource usage (CPU %, memory bytes, network I/O)
|
||||
- Unassigned devices monitoring
|
||||
- ZFS pools, datasets, snapshots, ARC stats
|
||||
- Parity check scheduling
|
||||
- Mover settings
|
||||
- Disk thresholds/settings
|
||||
- Service management
|
||||
- Plugin and update management
|
||||
- Flash drive info
|
||||
- Network access URLs (LAN, WAN, mDNS, IPv6)
|
||||
- User script execution
|
||||
- Share configuration modification (POST endpoints)
|
||||
- System settings modification
|
||||
|
||||
**MCP-specific features we lack:**
|
||||
- MCP Resources (subscribable real-time data: `unraid://system`, `unraid://array`, `unraid://containers`, `unraid://vms`, `unraid://disks`)
|
||||
- MCP Prompts (`analyze_disk_health`, `system_overview`, `troubleshoot_issue`)
|
||||
- Dual MCP transport (HTTP + SSE)
|
||||
- Confirmation-gated destructive operations
|
||||
|
||||
**REST Endpoints (59 total):**
|
||||
|
||||
| Category | Endpoints |
|
||||
|----------|-----------|
|
||||
| System & Health | `GET /health`, `GET /system`, `POST /system/reboot`, `POST /system/shutdown` |
|
||||
| Array | `GET /array`, `POST /array/start`, `POST /array/stop` |
|
||||
| Parity | `POST /parity-check/start\|stop\|pause\|resume`, `GET /parity-check/history`, `GET /parity-check/schedule` |
|
||||
| Disks | `GET /disks`, `GET /disks/{id}` |
|
||||
| Shares | `GET /shares`, `GET /shares/{name}/config`, `POST /shares/{name}/config` |
|
||||
| Docker | `GET /docker`, `GET /docker/{id}`, `POST /docker/{id}/start\|stop\|restart\|pause\|unpause` |
|
||||
| VMs | `GET /vm`, `GET /vm/{id}`, `POST /vm/{id}/start\|stop\|restart\|pause\|resume\|hibernate\|force-stop` |
|
||||
| UPS | `GET /ups` |
|
||||
| GPU | `GET /gpu` |
|
||||
| Network | `GET /network`, `GET /network/access-urls`, `GET /network/{interface}/config` |
|
||||
| Collectors | `GET /collectors/status`, `GET /collectors/{name}`, `POST /collectors/{name}/enable\|disable`, `PATCH /collectors/{name}/interval` |
|
||||
| Logs | `GET /logs`, `GET /logs/{filename}` |
|
||||
| Settings | `GET /settings/system\|docker\|vm\|disks\|disk-thresholds\|mover\|services\|network-services`, `POST /settings/system` |
|
||||
| Plugins | `GET /plugins`, `GET /updates` |
|
||||
| Flash | `GET /system/flash` |
|
||||
| Prometheus | `GET /metrics` |
|
||||
| WebSocket | `WS /ws` |
|
||||
|
||||
---
|
||||
|
||||
### 2. domalab/unraid-api-client
|
||||
|
||||
- **Repository:** [domalab/unraid-api-client](https://github.com/domalab/unraid-api-client)
|
||||
- **Language:** Python (async, aiohttp)
|
||||
- **Architecture:** Client library for the official Unraid GraphQL API
|
||||
- **API Type:** GraphQL client (same API we use)
|
||||
- **PyPI Package:** `unraid-api` (installable via pip)
|
||||
|
||||
**Key differentiators from our project:**
|
||||
- Pure client library (not an MCP server), but shows what the GraphQL API can do
|
||||
- Full Pydantic model coverage for all responses (type-safe)
|
||||
- SSL auto-discovery (handles Unraid's "No", "Yes", "Strict" SSL modes)
|
||||
- Redirect handling for myunraid.net remote access
|
||||
- Session injection for Home Assistant integration
|
||||
- Comprehensive exception hierarchy
|
||||
|
||||
**Methods we should consider adding MCP tools for:**
|
||||
|
||||
| Method | Our Coverage | Notes |
|
||||
|--------|-------------|-------|
|
||||
| `test_connection()` | Missing | Connection validation |
|
||||
| `get_version()` | Missing | API and OS version info |
|
||||
| `get_server_info()` | Partial | For device registration |
|
||||
| `get_system_metrics()` | Missing | CPU, memory, temperature, power, uptime as typed model |
|
||||
| `typed_get_array()` | Have `get_array_status()` | They have richer Pydantic model |
|
||||
| `typed_get_containers()` | Have `list_docker_containers()` | They have typed models |
|
||||
| `typed_get_vms()` | Have `list_vms()` | They have typed models |
|
||||
| `typed_get_ups_devices()` | **Missing** | UPS battery, power, runtime |
|
||||
| `typed_get_shares()` | Have `get_shares_info()` | Similar |
|
||||
| `get_notification_overview()` | Have it | Same |
|
||||
| `start/stop_container()` | Have `manage_docker_container()` | Same |
|
||||
| `pause/unpause_container()` | **Missing** | Docker pause/unpause |
|
||||
| `update_container()` | **Missing** | Container image update |
|
||||
| `remove_container()` | **Missing** | Container removal |
|
||||
| `start/stop_vm()` | Have `manage_vm()` | Same |
|
||||
| `pause/resume_vm()` | **Missing** | VM pause/resume |
|
||||
| `force_stop_vm()` | **Missing** | Force stop VM |
|
||||
| `reboot_vm()` | **Missing** | VM reboot |
|
||||
| `start/stop_array()` | **Missing** | Array start/stop control |
|
||||
| `start/pause/resume/cancel_parity_check()` | **Missing** | Full parity control |
|
||||
| `spin_up/down_disk()` | **Missing** | Disk spin control |
|
||||
| `get_parity_history()` | **Missing** | Historical parity data |
|
||||
| `typed_get_vars()` | Have `get_unraid_variables()` | Same |
|
||||
| `typed_get_registration()` | Have `get_registration_info()` | Same |
|
||||
| `typed_get_services()` | **Missing** | System services list |
|
||||
| `typed_get_flash()` | **Missing** | Flash drive info |
|
||||
| `typed_get_owner()` | **Missing** | Server owner info |
|
||||
| `typed_get_plugins()` | **Missing** | Installed plugins |
|
||||
| `typed_get_docker_networks()` | **Missing** | Docker network list |
|
||||
| `typed_get_log_files()` | Have `list_available_log_files()` | Same |
|
||||
| `typed_get_cloud()` | **Missing** | Unraid Connect cloud status |
|
||||
| `typed_get_connect()` | Have `get_connect_settings()` | Same |
|
||||
| `typed_get_remote_access()` | **Missing** | Remote access settings |
|
||||
| `get_physical_disks()` | Have `list_physical_disks()` | Same |
|
||||
| `get_array_disks()` | **Missing** | Array disk assignments |
|
||||
|
||||
---
|
||||
|
||||
### 3. mcp-ssh-sre / unraid-ssh-mcp
|
||||
|
||||
- **Repository:** [ohare93/mcp-ssh-sre](https://github.com/ohare93/mcp-ssh-sre)
|
||||
- **Language:** TypeScript/Node.js
|
||||
- **Architecture:** MCP server that connects via SSH to run predefined commands
|
||||
- **API Type:** SSH command execution (read-only by design)
|
||||
- **Tools:** 12 tool modules with 79+ actions
|
||||
|
||||
**Why SSH instead of GraphQL API:**
|
||||
The project's documentation explicitly compares SSH vs API capabilities:
|
||||
|
||||
| Feature | GraphQL API | SSH |
|
||||
|---------|------------|-----|
|
||||
| Docker container logs | Limited | Full |
|
||||
| SMART disk health data | Limited | Full (smartctl) |
|
||||
| Real-time CPU/load averages | Polling | Direct |
|
||||
| Network bandwidth monitoring | Limited | Full (iftop, nethogs) |
|
||||
| Process monitoring (ps/top) | Not available | Full |
|
||||
| Log file analysis | Basic | Full (grep, awk) |
|
||||
| Security auditing | Not available | Full |
|
||||
|
||||
**Tool modules and actions:**
|
||||
|
||||
| Module | Tool Name | Actions |
|
||||
|--------|-----------|---------|
|
||||
| Docker | `docker` | list_containers, inspect, logs, stats, port, env, top, health, logs_aggregate, list_networks, inspect_network, list_volumes, inspect_volume, network_containers |
|
||||
| System | `system` | list_files, read_file, find_files, disk_usage, system_info |
|
||||
| Monitoring | `monitoring` | ps, process_tree, top, iostat, network_connections |
|
||||
| Security | `security` | open_ports, audit_privileges, ssh_connections, cert_expiry |
|
||||
| Log Analysis | `log` | grep_all, error_aggregator, timeline, parse_docker, compare_timerange, restart_history |
|
||||
| Resources | `resource` | dangling, hogs, disk_analyzer, docker_df, zombies, io_profile |
|
||||
| Performance | `performance` | bottleneck, bandwidth, track_metric |
|
||||
| VMs | `vm` | list, info, vnc, logs |
|
||||
| Container Topology | `container_topology` | network_topology, volume_sharing, dependency_graph, port_conflicts, network_test |
|
||||
| Health Diagnostics | `health` | comprehensive, common_issues, threshold_alerts, compare_baseline, diagnostic_report, snapshot |
|
||||
| **Unraid Array** | `unraid` | array_status, smart, temps, shares, share_usage, parity_status, parity_history, sync_status, spin_status, unclean_check, mover_status, mover_log, cache_usage, split_level |
|
||||
| **Unraid Plugins** | `plugin` | list, updates, template, scripts, share_config, disk_assignments, recent_changes |
|
||||
|
||||
**Unique capabilities we lack entirely:**
|
||||
- Container log retrieval and aggregation
|
||||
- Container environment variable inspection
|
||||
- Container topology analysis (network maps, shared volumes, dependency graphs, port conflicts)
|
||||
- Process monitoring (ps, top, process trees)
|
||||
- Disk I/O monitoring (iostat)
|
||||
- Network connection analysis (ss/netstat)
|
||||
- Security auditing (open ports, privilege audit, SSH connection logs, SSL cert expiry)
|
||||
- Performance bottleneck analysis
|
||||
- Resource waste detection (dangling Docker resources, zombie processes)
|
||||
- Comprehensive health diagnostics with baseline comparison
|
||||
- Mover status and logs
|
||||
- Cache usage analysis
|
||||
- Split level configuration
|
||||
- User script discovery
|
||||
- Docker template inspection
|
||||
- Disk assignment information
|
||||
- Recent config file change detection
|
||||
|
||||
---
|
||||
|
||||
### 4. PSUnraid
|
||||
|
||||
- **Repository:** [jlabon2/PSUnraid](https://github.com/jlabon2/PSUnraid)
|
||||
- **Language:** PowerShell
|
||||
- **Architecture:** PowerShell module using GraphQL API
|
||||
- **API Type:** GraphQL (same as ours)
|
||||
- **Status:** Proof of concept, 30+ cmdlets
|
||||
|
||||
**Cmdlets and operations:**
|
||||
|
||||
| Category | Cmdlets |
|
||||
|----------|---------|
|
||||
| Connection | `Connect-Unraid`, `Disconnect-Unraid` |
|
||||
| System | `Get-UnraidServer`, `Get-UnraidMetrics`, `Get-UnraidLog`, `Start-UnraidMonitor` |
|
||||
| Docker | `Get-UnraidContainer`, `Start-UnraidContainer`, `Stop-UnraidContainer`, `Restart-UnraidContainer` |
|
||||
| VMs | `Get-UnraidVm`, `Start-UnraidVm`, `Stop-UnraidVm`, `Suspend-UnraidVm`, `Resume-UnraidVm`, `Restart-UnraidVm` |
|
||||
| Array | `Get-UnraidArray`, `Get-UnraidPhysicalDisk`, `Get-UnraidShare`, `Start-UnraidArray`, `Stop-UnraidArray` |
|
||||
| Parity | `Start-UnraidParityCheck`, `Stop-UnraidParityCheck`, `Suspend-UnraidParityCheck`, `Resume-UnraidParityCheck`, `Get-UnraidParityHistory` |
|
||||
| Notifications | `Get-UnraidNotification`, `Set-UnraidNotification`, `Remove-UnraidNotification` |
|
||||
| Other | `Get-UnraidPlugin`, `Get-UnraidUps`, `Restart-UnraidApi` |
|
||||
|
||||
**Features we lack that PSUnraid has (via same GraphQL API):**
|
||||
- Real-time monitoring dashboard (`Start-UnraidMonitor`)
|
||||
- Notification management (mark as read, delete notifications)
|
||||
- Array start/stop
|
||||
- Parity check full lifecycle (start, stop, pause, resume, history)
|
||||
- UPS monitoring
|
||||
- Plugin listing
|
||||
- API restart capability
|
||||
- VM suspend/resume/restart
|
||||
|
||||
---
|
||||
|
||||
### 5. ha-unraid (Home Assistant)
|
||||
|
||||
- **Repository:** [domalab/ha-unraid](https://github.com/domalab/ha-unraid) (ruaan-deysel fork is active)
|
||||
- **Language:** Python
|
||||
- **Architecture:** Home Assistant custom integration
|
||||
- **API Type:** Originally SSH-based (through v2025.06.11), rebuilt for GraphQL API (v2025.12.0+)
|
||||
- **Requires:** Unraid 7.2.0+, GraphQL API v4.21.0+
|
||||
|
||||
**Sensors provided:**
|
||||
|
||||
| Entity Type | Entities |
|
||||
|-------------|----------|
|
||||
| **Sensors** | CPU Usage, CPU Temperature, CPU Power, Memory Usage, Uptime, Array State, Array Usage, Parity Progress, per-Disk Usage, per-Share Usage, Flash Usage, UPS Battery, UPS Load, UPS Runtime, UPS Power, Notifications count |
|
||||
| **Binary Sensors** | Array Started, Parity Check Running, Parity Valid, per-Disk Health, UPS Connected |
|
||||
| **Switches** | Docker Container start/stop, VM start/stop |
|
||||
| **Buttons** | Array Start/Stop, Parity Check Start/Stop, Disk Spin Up/Down |
|
||||
|
||||
**Features we lack:**
|
||||
- CPU temperature and CPU power consumption monitoring
|
||||
- UPS full monitoring (battery, load, runtime, power, connected status)
|
||||
- Parity progress tracking
|
||||
- Per-disk health binary status
|
||||
- Flash device usage monitoring
|
||||
- Array start/stop buttons
|
||||
- Parity check start/stop
|
||||
- Disk spin up/down
|
||||
- Dynamic entity creation (only creates entities for available services)
|
||||
|
||||
---
|
||||
|
||||
### 6. chris-mc1/unraid_api (HA integration)
|
||||
|
||||
- **Repository:** [chris-mc1/unraid_api](https://github.com/chris-mc1/unraid_api)
|
||||
- **Language:** Python
|
||||
- **Architecture:** Lightweight Home Assistant integration using GraphQL API
|
||||
- **API Type:** GraphQL
|
||||
- **Status:** Simpler/lighter alternative to ha-unraid
|
||||
|
||||
**Entities provided:**
|
||||
- Array state sensor
|
||||
- Array used space percentage
|
||||
- RAM usage percentage
|
||||
- CPU utilization
|
||||
- Per-share free space (optional)
|
||||
- Per-disk state, temperature, spinning status, used space (optional)
|
||||
|
||||
**Notable:** This is a simpler, lighter-weight integration focused on monitoring only (no control operations).
|
||||
|
||||
---
|
||||
|
||||
## Feature Matrix
|
||||
|
||||
### Legend
|
||||
- **Y** = Supported
|
||||
- **N** = Not supported
|
||||
- **P** = Partial support
|
||||
- **--** = Not applicable
|
||||
|
||||
### Monitoring Features
|
||||
|
||||
| Feature | Our MCP (10 tools, 76 actions) | mgmt-agent (54 MCP tools) | unraid-api-client | mcp-ssh-sre (79 actions) | PSUnraid | ha-unraid | chris-mc1 |
|
||||
|---------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| System info (hostname, uptime) | Y | Y | Y | Y | Y | Y | N |
|
||||
| CPU usage | Y | Y | Y | Y | Y | Y | Y |
|
||||
| CPU temperature | N | Y | Y | N | N | Y | N |
|
||||
| CPU power consumption | N | Y | N | N | N | Y | N |
|
||||
| Memory usage | Y | Y | Y | Y | Y | Y | Y |
|
||||
| GPU metrics | N | Y | N | N | N | N | N |
|
||||
| Fan RPM | N | Y | N | N | N | N | N |
|
||||
| Motherboard temperature | N | Y | N | N | N | N | N |
|
||||
| UPS monitoring | N | Y | Y | N | Y | Y | N |
|
||||
| Network config | Y | Y | Y | Y | N | N | N |
|
||||
| Network bandwidth | N | Y | N | Y | N | N | N |
|
||||
| Registration/license info | Y | Y | Y | N | N | N | N |
|
||||
| Connect settings | Y | Y | Y | N | N | N | N |
|
||||
| Unraid variables | Y | Y | Y | N | N | N | N |
|
||||
| System services status | N | Y | Y | N | N | N | N |
|
||||
| Flash drive info | N | Y | Y | N | N | Y | N |
|
||||
| Owner info | N | N | Y | N | N | N | N |
|
||||
| Installed plugins | N | Y | Y | Y | Y | N | N |
|
||||
| Available updates | N | Y | N | Y | N | N | N |
|
||||
|
||||
### Storage Features
|
||||
|
||||
| Feature | Our MCP | mgmt-agent | unraid-api-client | mcp-ssh-sre | PSUnraid | ha-unraid | chris-mc1 |
|
||||
|---------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| Array status | Y | Y | Y | Y | Y | Y | Y |
|
||||
| Array start/stop | N | Y | Y | N | Y | Y | N |
|
||||
| Physical disk listing | Y | Y | Y | N | Y | N | N |
|
||||
| Disk details | Y | Y | Y | Y | Y | Y | Y |
|
||||
| Disk SMART data | N | Y | N | Y | N | P | N |
|
||||
| Disk spin up/down | N | Y | Y | Y | N | Y | N |
|
||||
| Disk temperatures | P | Y | Y | Y | N | Y | Y |
|
||||
| Disk I/O stats | N | Y | N | Y | N | N | N |
|
||||
| Shares info | Y | Y | Y | Y | Y | Y | Y |
|
||||
| Share configuration | N | Y | N | Y | N | N | N |
|
||||
| Parity check control | N | Y | Y | N | Y | Y | N |
|
||||
| Parity check history | N | Y | Y | Y | Y | N | N |
|
||||
| Parity progress | N | Y | Y | Y | Y | Y | N |
|
||||
| ZFS pools/datasets/snapshots | N | Y | N | N | N | N | N |
|
||||
| ZFS ARC stats | N | Y | N | N | N | N | N |
|
||||
| Unassigned devices | N | Y | N | N | N | N | N |
|
||||
| Mover status/logs | N | N | N | Y | N | N | N |
|
||||
| Cache usage | N | N | N | Y | N | N | N |
|
||||
|
||||
### Docker Features
|
||||
|
||||
| Feature | Our MCP | mgmt-agent | unraid-api-client | mcp-ssh-sre | PSUnraid | ha-unraid | chris-mc1 |
|
||||
|---------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| List containers | Y | Y | Y | Y | Y | Y | N |
|
||||
| Container details | Y | Y | Y | Y | N | P | N |
|
||||
| Start/stop/restart | Y | Y | Y | N | Y | Y | N |
|
||||
| Pause/unpause | N | Y | Y | N | N | N | N |
|
||||
| Container resource usage | N | Y | Y | Y | N | N | N |
|
||||
| Container logs | N | N | N | Y | N | N | N |
|
||||
| Container env vars | N | N | N | Y | N | N | N |
|
||||
| Container network topology | N | N | N | Y | N | N | N |
|
||||
| Container port inspection | N | N | N | Y | N | N | N |
|
||||
| Docker networks | N | Y | Y | Y | N | N | N |
|
||||
| Docker volumes | N | N | N | Y | N | N | N |
|
||||
| Container update | N | N | Y | N | N | N | N |
|
||||
| Container removal | N | N | Y | N | N | N | N |
|
||||
| Docker settings | N | Y | N | N | N | N | N |
|
||||
|
||||
### VM Features
|
||||
|
||||
| Feature | Our MCP | mgmt-agent | unraid-api-client | mcp-ssh-sre | PSUnraid | ha-unraid | chris-mc1 |
|
||||
|---------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| List VMs | Y | Y | Y | Y | Y | Y | N |
|
||||
| VM details | Y | Y | Y | Y | N | P | N |
|
||||
| Start/stop | Y | Y | Y | N | Y | Y | N |
|
||||
| Restart | Y | Y | N | N | Y | N | N |
|
||||
| Pause/resume | N | Y | Y | N | Y | N | N |
|
||||
| Hibernate | N | Y | N | N | N | N | N |
|
||||
| Force stop | N | Y | Y | N | Y | N | N |
|
||||
| Reboot VM | N | N | Y | N | N | N | N |
|
||||
| VNC info | N | N | N | Y | N | N | N |
|
||||
| VM libvirt logs | N | N | N | Y | N | N | N |
|
||||
| VM settings | N | Y | N | N | N | N | N |
|
||||
|
||||
### Cloud Storage (RClone) Features
|
||||
|
||||
| Feature | Our MCP | mgmt-agent | unraid-api-client | mcp-ssh-sre | PSUnraid | ha-unraid | chris-mc1 |
|
||||
|---------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| List remotes | Y | N | N | N | N | N | N |
|
||||
| Get config form | Y | N | N | N | N | N | N |
|
||||
| Create remote | Y | N | N | N | N | N | N |
|
||||
| Delete remote | Y | N | N | N | N | N | N |
|
||||
|
||||
> **Note:** RClone management is unique to our project among these competitors.
|
||||
|
||||
### Notification Features
|
||||
|
||||
| Feature | Our MCP | mgmt-agent | unraid-api-client | mcp-ssh-sre | PSUnraid | ha-unraid | chris-mc1 |
|
||||
|---------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| Notification overview | Y | Y | Y | N | N | Y | N |
|
||||
| List notifications | Y | Y | Y | Y | Y | N | N |
|
||||
| Mark as read | N | N | N | N | Y | N | N |
|
||||
| Delete notifications | N | N | N | N | Y | N | N |
|
||||
|
||||
### Logs & Diagnostics
|
||||
|
||||
| Feature | Our MCP | mgmt-agent | unraid-api-client | mcp-ssh-sre | PSUnraid | ha-unraid | chris-mc1 |
|
||||
|---------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| List log files | Y | Y | Y | N | N | N | N |
|
||||
| Get log contents | Y | Y | Y | Y | Y | N | N |
|
||||
| Log search/grep | N | N | N | Y | N | N | N |
|
||||
| Error aggregation | N | N | N | Y | N | N | N |
|
||||
| Syslog access | N | Y | N | Y | Y | N | N |
|
||||
| Docker daemon log | N | Y | N | Y | N | N | N |
|
||||
| Health check | Y | Y | N | Y | N | N | N |
|
||||
| Subscription diagnostics | Y | N | N | N | N | N | N |
|
||||
|
||||
### Integration & Protocol Features
|
||||
|
||||
| Feature | Our MCP | mgmt-agent | unraid-api-client | mcp-ssh-sre | PSUnraid | ha-unraid | chris-mc1 |
|
||||
|---------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
|
||||
| MCP tools | Y (10 tools, 76 actions) | Y (54) | N | Y (79 actions) | N | N | N |
|
||||
| MCP Resources | N | Y (5) | N | N | N | N | N |
|
||||
| MCP Prompts | N | Y (3) | N | N | N | N | N |
|
||||
| REST API | N | Y (59) | N | N | N | N | N |
|
||||
| WebSocket streaming | N | Y (9 events) | N | N | N | N | N |
|
||||
| Prometheus metrics | N | Y (41) | N | N | N | N | N |
|
||||
| MQTT publishing | N | Y | N | N | N | N | N |
|
||||
| SSE transport | Y | Y | N | Y | N | N | N |
|
||||
| Stdio transport | Y | N | N | Y | N | N | N |
|
||||
| Streamable HTTP | Y | Y | N | Y | N | N | N |
|
||||
| Pydantic models | N | N | Y | N | N | N | N |
|
||||
| Safety confirmations | N | Y | N | N | N | N | N |
|
||||
|
||||
### Security & Operational Features
|
||||
|
||||
| Feature | Our MCP | mgmt-agent | mcp-ssh-sre | PSUnraid |
|
||||
|---------|:---:|:---:|:---:|:---:|
|
||||
| Open port scanning | N | N | Y | N |
|
||||
| SSH login monitoring | N | N | Y | N |
|
||||
| Container privilege audit | N | N | Y | N |
|
||||
| SSL certificate expiry | N | N | Y | N |
|
||||
| Process monitoring | N | N | Y | N |
|
||||
| Zombie process detection | N | N | Y | N |
|
||||
| Performance bottleneck analysis | N | N | Y | N |
|
||||
| System reboot | N | Y | N | N |
|
||||
| System shutdown | N | Y | N | N |
|
||||
| User script execution | N | Y | Y | N |
|
||||
|
||||
---
|
||||
|
||||
## Gap Analysis
|
||||
|
||||
### Priority 1: High-Value Features Available via GraphQL API
|
||||
|
||||
These features are available through the same GraphQL API we already use and should be straightforward to implement:
|
||||
|
||||
1. **Array start/stop control** -- Both `domalab/unraid-api-client` and `PSUnraid` implement this via GraphQL mutations. This is a fundamental control operation that every competitor supports.
|
||||
|
||||
2. **Parity check lifecycle** (start, stop, pause, resume, history) -- Available via GraphQL mutations. Critical for array management.
|
||||
|
||||
3. **Disk spin up/down** -- Available via GraphQL mutations. Important for power management and noise control.
|
||||
|
||||
4. **UPS monitoring** -- Available via GraphQL query. Present in `unraid-api-client`, `PSUnraid`, and `ha-unraid`. Data includes battery level, load, runtime, power state.
|
||||
|
||||
5. **System services list** -- Available via GraphQL query (`services`). Shows Docker service, VM manager status, etc.
|
||||
|
||||
6. **Flash drive info** -- Available via GraphQL query (`flash`). Boot device monitoring.
|
||||
|
||||
7. **Installed plugins list** -- Available via GraphQL query (`plugins`). Useful for understanding server configuration.
|
||||
|
||||
8. **Docker networks** -- Available via GraphQL query. Listed in `unraid-api-client`.
|
||||
|
||||
9. **Parity history** -- Available via GraphQL query. Historical parity check data.
|
||||
|
||||
10. **VM pause/resume and force stop** -- Available via GraphQL mutations. Completing our VM control capabilities.
|
||||
|
||||
11. **Docker pause/unpause** -- Available via GraphQL mutations. Completing our Docker control capabilities.
|
||||
|
||||
12. **Cloud/remote access status** -- Available via GraphQL queries. Shows Unraid Connect status, remote access configuration.
|
||||
|
||||
13. **Notification management** -- Mark as read, delete. `PSUnraid` implements this via GraphQL.
|
||||
|
||||
14. **API/OS version info** -- Simple query that helps with compatibility checks.
|
||||
|
||||
### Priority 2: High-Value Features Requiring Non-GraphQL Data Sources
|
||||
|
||||
These would require SSH access or other system-level access that our GraphQL-only architecture cannot provide:
|
||||
|
||||
1. **Container logs** -- Not available via GraphQL. SSH-based solutions (mcp-ssh-sre) can retrieve full container logs via `docker logs`.
|
||||
|
||||
2. **SMART disk data** -- Limited via GraphQL. Full SMART data (power-on hours, error counts, reallocated sectors) requires `smartctl` access.
|
||||
|
||||
3. **GPU metrics** -- Not available via GraphQL. Requires nvidia-smi or similar.
|
||||
|
||||
4. **Process monitoring** -- Not available via GraphQL. Requires `ps`/`top` access.
|
||||
|
||||
5. **Network bandwidth** -- Not in GraphQL. Requires direct system access.
|
||||
|
||||
6. **Container resource usage** (CPU%, memory) -- Not available through the current GraphQL API at a per-container level in real-time.
|
||||
|
||||
7. **Log search/grep** -- While we can get log contents, we cannot search across logs.
|
||||
|
||||
8. **Security auditing** -- Not available via GraphQL.
|
||||
|
||||
### Priority 3: Architectural Improvements
|
||||
|
||||
1. **MCP Resources** -- Add subscribable data streams (system, array, containers, VMs, disks) for real-time AI agent monitoring.
|
||||
|
||||
2. **MCP Prompts** -- Add guided interaction templates (disk health analysis, system overview, troubleshooting).
|
||||
|
||||
3. **Confirmation for destructive operations** -- Add a `confirm` parameter for array stop, system reboot, container removal, etc.
|
||||
|
||||
4. **Pydantic response models** -- Type-safe response parsing like `domalab/unraid-api-client`.
|
||||
|
||||
5. **Connection validation tool** -- Simple tool to verify API connectivity and version compatibility.
|
||||
|
||||
---
|
||||
|
||||
## Recommended Priorities
|
||||
|
||||
### Phase 1: Low-Hanging Fruit (GraphQL mutations/queries we already have access to)
|
||||
|
||||
**Estimated effort: Small -- these are straightforward GraphQL queries/mutations**
|
||||
|
||||
| New Tool | Priority | Notes |
|
||||
|----------|----------|-------|
|
||||
| `start_array()` / `stop_array()` | Critical | Every competitor has this |
|
||||
| `start_parity_check()` / `stop_parity_check()` | Critical | Full parity lifecycle |
|
||||
| `pause_parity_check()` / `resume_parity_check()` | Critical | Full parity lifecycle |
|
||||
| `get_parity_history()` | High | Historical data |
|
||||
| `spin_up_disk()` / `spin_down_disk()` | High | Disk power management |
|
||||
| `get_ups_status()` | High | UPS monitoring |
|
||||
| `get_services_status()` | Medium | System services |
|
||||
| `get_flash_info()` | Medium | Flash drive info |
|
||||
| `get_plugins()` | Medium | Plugin management |
|
||||
| `get_docker_networks()` | Medium | Docker networking |
|
||||
| `pause_docker_container()` / `unpause_docker_container()` | Medium | Docker control |
|
||||
| `pause_vm()` / `resume_vm()` / `force_stop_vm()` | Medium | VM control |
|
||||
| `get_cloud_status()` / `get_remote_access()` | Low | Connect info |
|
||||
| `get_version()` | Low | API version |
|
||||
| `manage_notifications()` | Low | Mark read/delete |
|
||||
|
||||
### Phase 2: MCP Protocol Enhancements
|
||||
|
||||
| Enhancement | Priority | Notes |
|
||||
|-------------|----------|-------|
|
||||
| MCP Resources (5 streams) | High | Real-time data for AI agents |
|
||||
| MCP Prompts (3 templates) | Medium | Guided interactions |
|
||||
| Confirmation parameter | High | Safety for destructive ops |
|
||||
| Connection validation tool | Medium | Health/compatibility check |
|
||||
|
||||
### Phase 3: Advanced Features (may require SSH)
|
||||
|
||||
| Feature | Priority | Notes |
|
||||
|---------|----------|-------|
|
||||
| Container log retrieval | High | Most-requested SSH-only feature |
|
||||
| SMART disk health data | High | Disk failure prediction |
|
||||
| GPU monitoring | Medium | For GPU passthrough users |
|
||||
| Performance/resource monitoring | Medium | Bottleneck analysis |
|
||||
| Security auditing | Low | Port scan, login audit |
|
||||
|
||||
---
|
||||
|
||||
## Sources
|
||||
|
||||
- [ruaan-deysel/unraid-management-agent](https://github.com/ruaan-deysel/unraid-management-agent) -- Go-based Unraid plugin with REST API, WebSocket, MCP, Prometheus, and MQTT
|
||||
- [domalab/unraid-api-client](https://github.com/domalab/unraid-api-client) -- Async Python client for Unraid GraphQL API (PyPI: `unraid-api`)
|
||||
- [ohare93/mcp-ssh-sre](https://github.com/ohare93/mcp-ssh-sre) -- SSH-based MCP server for read-only server monitoring
|
||||
- [jlabon2/PSUnraid](https://github.com/jlabon2/PSUnraid) -- PowerShell module for Unraid 7.x management via GraphQL API
|
||||
- [domalab/ha-unraid](https://github.com/domalab/ha-unraid) (ruaan-deysel fork) -- Home Assistant integration via GraphQL API
|
||||
- [chris-mc1/unraid_api](https://github.com/chris-mc1/unraid_api) -- Lightweight Home Assistant integration for Unraid
|
||||
- [nickbeddows-ctrl/unraid-ssh-mcp](https://github.com/nickbeddows-ctrl/unraid-ssh-mcp) -- Guardrailed MCP server for Unraid management via SSH
|
||||
- [MCP SSH Unraid on LobeHub](https://lobehub.com/mcp/ohare93-unraid-ssh-mcp)
|
||||
- [MCP SSH SRE on Glama](https://glama.ai/mcp/servers/@ohare93/mcp-ssh-sre)
|
||||
- [Unraid Integration for Home Assistant (domalab docs)](https://domalab.github.io/ha-unraid/)
|
||||
- [Home Assistant Unraid Integration forum thread](https://community.home-assistant.io/t/unraid-integration/785003)
|
||||
@@ -1,845 +0,0 @@
|
||||
# Unraid API Feature Gap Analysis
|
||||
|
||||
> **Date:** 2026-02-07
|
||||
> **Purpose:** Comprehensive inventory of every API capability that could become an MCP tool, cross-referenced against our current 10 tools (76 actions) to identify gaps.
|
||||
> **Sources:** 7 research documents (3,800+ lines), Unraid API source code analysis, community project reviews, official documentation crawl.
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [All GraphQL Queries Available](#a-all-graphql-queries-available)
|
||||
2. [All GraphQL Mutations Available](#b-all-graphql-mutations-available)
|
||||
3. [All GraphQL Subscriptions Available](#c-all-graphql-subscriptions-available)
|
||||
4. [All Custom Scalars and Types](#d-all-custom-scalars-and-types)
|
||||
5. [All Enums](#e-all-enums)
|
||||
6. [API Capabilities NOT in Current MCP Server](#f-api-capabilities-not-currently-in-the-mcp-server)
|
||||
7. [Community Project Capabilities](#g-community-project-capabilities)
|
||||
8. [Known API Bugs and Limitations](#h-known-api-bugs-and-limitations)
|
||||
|
||||
---
|
||||
|
||||
## A. All GraphQL Queries Available
|
||||
|
||||
Every query type identified across all research documents, with their fields and sub-fields.
|
||||
|
||||
### A.1 System & Server Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `info` | `time`, `baseboard { manufacturer, model, version, serial }`, `cpu { manufacturer, brand, vendor, family, model, stepping, revision, voltage, speed, speedmin, speedmax, threads, cores, processors, socket, cache, flags }`, `devices`, `display`, `machineId`, `memory { max, total, free, used, active, available, buffcache, swaptotal, swapused, swapfree, layout[] }`, `os { platform, distro, release, codename, kernel, arch, hostname, codepage, logofile, serial, build, uptime }`, `system { manufacturer, model, version, serial, uuid }`, `versions { kernel, docker, unraid, node }`, `apps { installed, started }` | **YES** - `get_system_info()` |
|
||||
| `vars` | `id`, `version`, `name`, `timeZone`, `comment`, `security`, `workgroup`, `domain`, `useNtp`, `ntpServer1-4`, `useSsl`, `port`, `portssl`, `useTelnet`, `useSsh`, `portssh`, `startPage`, `startArray`, `spindownDelay`, `defaultFormat`, `defaultFsType`, `shutdownTimeout`, `shareDisk`, `shareUser`, `shareSmbEnabled`, `shareNfsEnabled`, `shareAfpEnabled`, `shareCacheEnabled`, `shareMoverSchedule`, `shareMoverLogging`, `safeMode`, `configValid`, `configError`, `deviceCount`, `flashGuid`, `flashProduct`, `flashVendor`, `regState`, `regTo`, `mdState`, `mdNumDisks`, `mdNumDisabled`, `mdNumInvalid`, `mdNumMissing`, `mdResync`, `mdResyncAction`, `fsState`, `fsProgress`, `fsCopyPrcnt`, `shareCount`, `shareSmbCount`, `shareNfsCount`, `csrfToken`, `maxArraysz`, `maxCachesz` | **YES** - `get_unraid_variables()` |
|
||||
| `online` | `Boolean` | **NO** |
|
||||
| `owner` | Server owner information | **NO** |
|
||||
| `server` | Server details | **NO** |
|
||||
| `servers` | `[Server!]!` - List of all servers (Connect-managed) | **NO** |
|
||||
| `me` | `id`, `name`, `description`, `roles`, `permissions` (current authenticated user) | **NO** |
|
||||
| `user(id)` | `id`, `name`, `description`, `roles`, `password`, `permissions` | **NO** |
|
||||
| `users(input)` | `[User!]!` - List of users | **NO** |
|
||||
| `config` | `Config!` - System configuration | **NO** |
|
||||
| `display` | Display settings | **NO** |
|
||||
| `services` | `[Service!]!` - Running services list | **NO** |
|
||||
| `cloud` | `error`, `apiKey`, `relay`, `minigraphql`, `cloud`, `allowedOrigins` | **NO** |
|
||||
| `flash` | Flash drive information | **NO** |
|
||||
|
||||
### A.2 Network Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `network` | `id`, `iface`, `ifaceName`, `ipv4`, `ipv6`, `mac`, `internal`, `operstate`, `type`, `duplex`, `mtu`, `speed`, `carrierChanges`, `accessUrls { type, name, ipv4, ipv6 }` | **YES** - `get_network_config()` |
|
||||
|
||||
### A.3 Storage & Array Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `array` | `id`, `state`, `previousState`, `pendingState`, `capacity { kilobytes { free, used, total }, disks { free, used, total } }`, `boot { id, idx, name, device, size, fsSize, fsFree, fsUsed, status, rotational, temp, numReads, numWrites, numErrors, type, exportable, warning, critical, fsType, comment, format, transport, color, isSpinning }`, `parities[...]`, `disks[...]`, `caches[...]`, `parityCheckStatus` | **PARTIAL** - `get_array_status()` (missing `previousState`, `pendingState`, `parityCheckStatus`, disk fields like `color`, `isSpinning`, `transport`, `format`) |
|
||||
| `parityHistory` | `[ParityCheck]` - Historical parity check records | **NO** |
|
||||
| `disks` | `[Disk]!` - All physical disks with `device`, `type`, `name`, `vendor`, `size`, `bytesPerSector`, `totalCylinders`, `totalHeads`, `totalSectors`, `totalTracks`, `tracksPerCylinder`, `sectorsPerTrack`, `firmwareRevision`, `serialNum`, `interfaceType`, `smartStatus`, `temperature`, `partitions[]` | **YES** - `list_physical_disks()` |
|
||||
| `disk(id)` | Single disk by PrefixedID | **YES** - `get_disk_details()` |
|
||||
| `shares` | `name`, `free`, `used`, `size`, `include[]`, `exclude[]`, `cache`, `nameOrig`, `comment`, `allocator`, `splitLevel`, `floor`, `cow`, `color`, `luksStatus` | **PARTIAL** - `get_shares_info()` (may not query all fields like `allocator`, `splitLevel`, `floor`, `cow`, `luksStatus`) |
|
||||
| `unassignedDevices` | `[UnassignedDevice]` - Devices not assigned to array/pool | **NO** |
|
||||
|
||||
### A.4 Docker Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `docker` | `id`, `containers[]`, `networks[]` | **YES** - `list_docker_containers()` |
|
||||
| `dockerContainers(all)` | `[DockerContainer!]!` - All containers with full details including `id`, `names`, `image`, `imageId`, `command`, `created`, `ports[]`, `lanIpPorts[]`, `sizeRootFs`, `sizeRw`, `sizeLog`, `labels`, `state`, `status`, `hostConfig`, `networkSettings`, `mounts`, `autoStart`, `autoStartOrder`, `autoStartWait`, `templatePath`, `projectUrl`, `registryUrl`, `supportUrl`, `iconUrl`, `webUiUrl`, `shell`, `templatePorts`, `isOrphaned` | **YES** - `list_docker_containers()` / `get_docker_container_details()` |
|
||||
| `container(id)` (via Docker resolver) | Single container by PrefixedID | **YES** - `get_docker_container_details()` |
|
||||
| `docker.logs(id, since, tail)` | Container log output with filtering | **NO** |
|
||||
| `docker.networks` / `dockerNetworks(all)` | `[DockerNetwork]` - name, id, created, scope, driver, enableIPv6, ipam, internal, attachable, ingress, configFrom, configOnly, containers, options, labels | **NO** |
|
||||
| `dockerNetwork(id)` | Single network by ID | **NO** |
|
||||
| `docker.portConflicts` | Port conflict detection | **NO** |
|
||||
| `docker.organizer` | Container organization/folder structure | **NO** |
|
||||
| `docker.containerUpdateStatuses` | Check for available container image updates (`UpdateStatus`: UP_TO_DATE, UPDATE_AVAILABLE, REBUILD_READY, UNKNOWN) | **NO** |
|
||||
|
||||
### A.5 VM Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `vms` | `id`, `domain[{ uuid/id, name, state }]` | **YES** - `list_vms()` / `get_vm_details()` |
|
||||
|
||||
### A.6 Notification Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `notifications` | `id`, `overview { unread { info, warning, alert, total }, archive { info, warning, alert, total } }`, `list(filter) [{ id, title, subject, description, importance, link, type, timestamp, formattedTimestamp }]` | **YES** - `get_notifications_overview()` / `list_notifications()` |
|
||||
| `notifications.warningsAndAlerts` | Deduplicated unread warnings and alerts | **NO** |
|
||||
|
||||
### A.7 Registration & Connect Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `registration` | `id`, `type`, `state`, `expiration`, `updateExpiration`, `keyFile { location, contents }` | **YES** - `get_registration_info()` |
|
||||
| `connect` | `id`, `dynamicRemoteAccess { ... }` | **YES** - `get_connect_settings()` |
|
||||
| `remoteAccess` | `accessType`, `forwardType`, `port` | **NO** |
|
||||
| `extraAllowedOrigins` | `[String!]!` | **NO** |
|
||||
|
||||
### A.8 RClone Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `rclone.remotes` | `name`, `type`, `parameters`, `config` | **YES** - `list_rclone_remotes()` |
|
||||
| `rclone.configForm(formOptions)` | `id`, `dataSchema`, `uiSchema` | **YES** - `get_rclone_config_form()` |
|
||||
|
||||
### A.9 Logs Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `logFiles` | List available log files | **YES** - `list_available_log_files()` |
|
||||
| `logFile(path, lines, startLine)` | Specific log file content with pagination | **YES** - `get_logs()` |
|
||||
|
||||
### A.10 Settings Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `settings` | `unified { values }`, SSO config | **NO** |
|
||||
|
||||
### A.11 API Key Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `apiKeys` | `[ApiKey!]!` - List all API keys with `id`, `name`, `description`, `roles[]`, `createdAt`, `permissions[]` | **NO** |
|
||||
| `apiKey(id)` | Single API key by ID | **NO** |
|
||||
|
||||
### A.12 UPS Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `upsDevices` | List UPS devices with status | **NO** |
|
||||
| `upsDeviceById(id)` | Specific UPS device | **NO** |
|
||||
| `upsConfiguration` | UPS configuration settings | **NO** |
|
||||
|
||||
### A.13 Metrics Queries
|
||||
|
||||
| Query | Fields | Current MCP Coverage |
|
||||
|-------|--------|---------------------|
|
||||
| `metrics` | System performance metrics (CPU, memory utilization) | **NO** |
|
||||
|
||||
---
|
||||
|
||||
## B. All GraphQL Mutations Available
|
||||
|
||||
Every mutation identified across all research documents with their parameters and return types.
|
||||
|
||||
### B.1 Array Management Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `startArray` | none | `Array` | **NO** |
|
||||
| `stopArray` | none | `Array` | **NO** |
|
||||
| `addDiskToArray(input)` | `arrayDiskInput` | `Array` | **NO** |
|
||||
| `removeDiskFromArray(input)` | `arrayDiskInput` | `Array` | **NO** |
|
||||
| `mountArrayDisk(id)` | `ID!` | `Disk` | **NO** |
|
||||
| `unmountArrayDisk(id)` | `ID!` | `Disk` | **NO** |
|
||||
| `clearArrayDiskStatistics(id)` | `ID!` | `JSON` | **NO** |
|
||||
|
||||
### B.2 Parity Check Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `startParityCheck(correct)` | `correct: Boolean` | `JSON` | **NO** |
|
||||
| `pauseParityCheck` | none | `JSON` | **NO** |
|
||||
| `resumeParityCheck` | none | `JSON` | **NO** |
|
||||
| `cancelParityCheck` | none | `JSON` | **NO** |
|
||||
|
||||
### B.3 Docker Container Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `docker.start(id)` | `PrefixedID!` | `DockerContainer` | **YES** - `manage_docker_container(action="start")` |
|
||||
| `docker.stop(id)` | `PrefixedID!` | `DockerContainer` | **YES** - `manage_docker_container(action="stop")` |
|
||||
| `docker.pause(id)` | `PrefixedID!` | `DockerContainer` | **NO** |
|
||||
| `docker.unpause(id)` | `PrefixedID!` | `DockerContainer` | **NO** |
|
||||
| `docker.removeContainer(id, withImage?)` | `PrefixedID!`, `Boolean` | `DockerContainer` | **NO** |
|
||||
| `docker.updateContainer(id)` | `PrefixedID!` | `DockerContainer` | **NO** |
|
||||
| `docker.updateContainers(ids)` | `[PrefixedID!]!` | `[DockerContainer]` | **NO** |
|
||||
| `docker.updateAllContainers` | none | `[DockerContainer]` | **NO** |
|
||||
| `docker.updateAutostartConfiguration` | auto-start config | varies | **NO** |
|
||||
|
||||
### B.4 Docker Organizer Mutations (Feature-Flagged)
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `docker.createDockerFolder` | folder config | varies | **NO** |
|
||||
| `docker.setDockerFolderChildren` | folder ID, children | varies | **NO** |
|
||||
| `docker.deleteDockerEntries` | entry IDs | varies | **NO** |
|
||||
| `docker.moveDockerEntriesToFolder` | entries, folder | varies | **NO** |
|
||||
| `docker.moveDockerItemsToPosition` | items, position | varies | **NO** |
|
||||
| `docker.renameDockerFolder` | folder ID, name | varies | **NO** |
|
||||
| `docker.createDockerFolderWithItems` | folder config, items | varies | **NO** |
|
||||
|
||||
### B.5 Docker Template Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `docker.syncDockerTemplatePaths` | none | varies | **NO** |
|
||||
| `docker.resetDockerTemplateMappings` | none | varies | **NO** |
|
||||
|
||||
### B.6 VM Management Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `vm.start(id)` | `PrefixedID!` | `Boolean` | **YES** - `manage_vm(action="start")` |
|
||||
| `vm.stop(id)` | `PrefixedID!` | `Boolean` | **YES** - `manage_vm(action="stop")` |
|
||||
| `vm.pause(id)` | `PrefixedID!` | `Boolean` | **YES** - `manage_vm(action="pause")` |
|
||||
| `vm.resume(id)` | `PrefixedID!` | `Boolean` | **YES** - `manage_vm(action="resume")` |
|
||||
| `vm.forceStop(id)` | `PrefixedID!` | `Boolean` | **YES** - `manage_vm(action="forceStop")` |
|
||||
| `vm.reboot(id)` | `PrefixedID!` | `Boolean` | **YES** - `manage_vm(action="reboot")` |
|
||||
| `vm.reset(id)` | `PrefixedID!` | `Boolean` | **YES** - `manage_vm(action="reset")` |
|
||||
|
||||
### B.7 Notification Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `createNotification(input)` | `NotificationData!` | `Notification!` | **NO** |
|
||||
| `deleteNotification(id, type)` | `String!`, `NotificationType!` | `NotificationOverview!` | **NO** |
|
||||
| `deleteArchivedNotifications` | none | `NotificationOverview!` | **NO** |
|
||||
| `archiveNotification(id)` | `String!` | `Notification!` | **NO** |
|
||||
| `unreadNotification(id)` | `String!` | `Notification!` | **NO** |
|
||||
| `archiveNotifications(ids)` | `[String!]` | `NotificationOverview!` | **NO** |
|
||||
| `unarchiveNotifications(ids)` | `[String!]` | `NotificationOverview!` | **NO** |
|
||||
| `archiveAll(importance?)` | `Importance` (optional) | `NotificationOverview!` | **NO** |
|
||||
| `unarchiveAll(importance?)` | `Importance` (optional) | `NotificationOverview!` | **NO** |
|
||||
| `recalculateOverview` | none | `NotificationOverview!` | **NO** |
|
||||
| `notifyIfUnique(input)` | `NotificationData!` | `Notification!` | **NO** |
|
||||
|
||||
### B.8 RClone Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `createRCloneRemote(input)` | name, type, config | `RCloneRemote` | **YES** - `create_rclone_remote()` |
|
||||
| `deleteRCloneRemote(input)` | name | `Boolean` | **YES** - `delete_rclone_remote()` |
|
||||
|
||||
### B.9 Server Power Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `shutdown` | none | `String` | **NO** |
|
||||
| `reboot` | none | `String` | **NO** |
|
||||
|
||||
### B.10 Authentication & User Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `login(username, password)` | `String!`, `String!` | `String` | **NO** |
|
||||
| `createApiKey(input)` | `CreateApiKeyInput!` | `ApiKeyWithSecret!` | **NO** |
|
||||
| `addPermission(input)` | `AddPermissionInput!` | `Boolean!` | **NO** |
|
||||
| `addRoleForUser(input)` | `AddRoleForUserInput!` | `Boolean!` | **NO** |
|
||||
| `addRoleForApiKey(input)` | `AddRoleForApiKeyInput!` | `Boolean!` | **NO** |
|
||||
| `removeRoleFromApiKey(input)` | `RemoveRoleFromApiKeyInput!` | `Boolean!` | **NO** |
|
||||
| `deleteApiKeys(input)` | API key IDs | `Boolean` | **NO** |
|
||||
| `updateApiKey(input)` | API key update data | `Boolean` | **NO** |
|
||||
| `addUser(input)` | `addUserInput!` | `User` | **NO** |
|
||||
| `deleteUser(input)` | `deleteUserInput!` | `User` | **NO** |
|
||||
|
||||
### B.11 Connect/Remote Access Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `connectSignIn(input)` | `ConnectSignInInput!` | `Boolean!` | **NO** |
|
||||
| `connectSignOut` | none | `Boolean!` | **NO** |
|
||||
| `enableDynamicRemoteAccess(input)` | `EnableDynamicRemoteAccessInput!` | `Boolean!` | **NO** |
|
||||
| `setAdditionalAllowedOrigins(input)` | `AllowedOriginInput!` | `[String!]!` | **NO** |
|
||||
| `setupRemoteAccess(input)` | `SetupRemoteAccessInput!` | `Boolean!` | **NO** |
|
||||
|
||||
### B.12 UPS Mutations
|
||||
|
||||
| Mutation | Parameters | Returns | Current MCP Coverage |
|
||||
|----------|------------|---------|---------------------|
|
||||
| `configureUps(config)` | UPS configuration | varies | **NO** |
|
||||
|
||||
---
|
||||
|
||||
## C. All GraphQL Subscriptions Available
|
||||
|
||||
Every subscription channel identified with update intervals and event triggers.
|
||||
|
||||
### C.1 PubSub Channel Definitions (from source code)
|
||||
|
||||
```
|
||||
GRAPHQL_PUBSUB_CHANNEL {
|
||||
ARRAY // Array state changes
|
||||
CPU_UTILIZATION // 1-second CPU utilization data
|
||||
CPU_TELEMETRY // 5-second CPU power & temperature
|
||||
DASHBOARD // Dashboard aggregate updates
|
||||
DISPLAY // Display settings changes
|
||||
INFO // System information changes
|
||||
MEMORY_UTILIZATION // 2-second memory utilization
|
||||
NOTIFICATION // Notification state changes
|
||||
NOTIFICATION_ADDED // New notification created
|
||||
NOTIFICATION_OVERVIEW // Notification count updates
|
||||
NOTIFICATION_WARNINGS_AND_ALERTS // Warning/alert changes
|
||||
OWNER // Owner information changes
|
||||
SERVERS // Server list changes
|
||||
VMS // VM state changes
|
||||
DOCKER_STATS // Container performance stats
|
||||
LOG_FILE // Real-time log file updates (dynamic path)
|
||||
PARITY // Parity check progress
|
||||
}
|
||||
```
|
||||
|
||||
### C.2 GraphQL Subscription Types (from schema)
|
||||
|
||||
| Subscription | Channel | Interval | Description | Current MCP Coverage |
|
||||
|-------------|---------|----------|-------------|---------------------|
|
||||
| `array` | ARRAY | Event-based | Real-time array state changes | **NO** (diag only) |
|
||||
| `parityHistory` | PARITY | Event-based | Parity check progress updates | **NO** |
|
||||
| `ping` | - | - | Connection keepalive | **NO** |
|
||||
| `info` | INFO | Event-based | System info changes | **NO** (diag only) |
|
||||
| `online` | - | Event-based | Online status changes | **NO** |
|
||||
| `config` | - | Event-based | Configuration changes | **NO** |
|
||||
| `display` | DISPLAY | Event-based | Display settings changes | **NO** |
|
||||
| `dockerContainer(id)` | DOCKER_STATS | Polling | Single container stats (CPU%, mem, net I/O, block I/O) | **NO** |
|
||||
| `dockerContainers` | DOCKER_STATS | Polling | All container state changes | **NO** |
|
||||
| `dockerNetwork(id)` | - | Event-based | Single network changes | **NO** |
|
||||
| `dockerNetworks` | - | Event-based | All network changes | **NO** |
|
||||
| `flash` | - | Event-based | Flash drive changes | **NO** |
|
||||
| `notificationAdded` | NOTIFICATION_ADDED | Event-based | New notification created | **NO** |
|
||||
| `notificationsOverview` | NOTIFICATION_OVERVIEW | Event-based | Notification count updates | **NO** |
|
||||
| `notificationsWarningsAndAlerts` | NOTIFICATION_WARNINGS_AND_ALERTS | Event-based | Warning/alert changes | **NO** |
|
||||
| `owner` | OWNER | Event-based | Owner info changes | **NO** |
|
||||
| `registration` | - | Event-based | Registration changes | **NO** |
|
||||
| `server` | - | Event-based | Server status changes | **NO** |
|
||||
| `service(name)` | - | Event-based | Specific service changes | **NO** |
|
||||
| `share(id)` | - | Event-based | Single share changes | **NO** |
|
||||
| `shares` | - | Event-based | All shares changes | **NO** |
|
||||
| `unassignedDevices` | - | Event-based | Unassigned device changes | **NO** |
|
||||
| `me` | - | Event-based | Current user changes | **NO** |
|
||||
| `user(id)` | - | Event-based | Specific user changes | **NO** |
|
||||
| `users` | - | Event-based | User list changes | **NO** |
|
||||
| `vars` | - | Event-based | Server variable changes | **NO** |
|
||||
| `vms` | VMS | Event-based | VM state changes | **NO** |
|
||||
| `systemMetricsCpu` | CPU_UTILIZATION | 1 second | Real-time CPU utilization | **NO** |
|
||||
| `systemMetricsCpuTelemetry` | CPU_TELEMETRY | 5 seconds | CPU power & temperature | **NO** |
|
||||
| `systemMetricsMemory` | MEMORY_UTILIZATION | 2 seconds | Memory utilization | **NO** |
|
||||
| `logFileSubscription(path)` | LOG_FILE (dynamic) | Event-based | Real-time log tailing | **NO** |
|
||||
| `upsUpdates` | - | Event-based | UPS status changes | **NO** |
|
||||
|
||||
**Note:** The current MCP server has `test_subscription_query()` and `diagnose_subscriptions()` as diagnostic tools but does NOT expose any production subscription-based tools that stream real-time data.
|
||||
|
||||
---
|
||||
|
||||
## D. All Custom Scalars and Types
|
||||
|
||||
### D.1 Custom Scalar Types
|
||||
|
||||
| Scalar | Description | Serialization | Usage |
|
||||
|--------|-------------|---------------|-------|
|
||||
| `PrefixedID` | Server-prefixed identifiers | String (format: `TypePrefix:uuid`) | Container IDs, VM IDs, disk IDs, share IDs |
|
||||
| `Long` | 52-bit integers (exceeds GraphQL Int 32-bit limit) | String in JSON | Disk sizes, memory values, operation counters |
|
||||
| `BigInt` | Large integer values | String in JSON | Same as Long (used in newer schema versions) |
|
||||
| `DateTime` | ISO 8601 date-time string (RFC 3339) | String | Timestamps, uptime, creation dates |
|
||||
| `JSON` | Arbitrary JSON data structures | Object | Labels, network settings, mounts, host config |
|
||||
| `Port` | Valid TCP port number (0-65535) | Integer | Network port references |
|
||||
| `URL` | Standard URL format | String | Web UI URLs, registry URLs, support URLs |
|
||||
| `UUID` | Universally Unique Identifier | String | VM domain UUIDs |
|
||||
|
||||
### D.2 Core Interface Types
|
||||
|
||||
| Interface | Fields | Implementors |
|
||||
|-----------|--------|-------------|
|
||||
| `Node` | `id: ID!` | `Array`, `Info`, `Network`, `Notifications`, `Connect`, `ArrayDisk`, `DockerContainer`, `VmDomain`, `Share` |
|
||||
| `UserAccount` | `id`, `name`, `description`, `roles`, `permissions` | `Me`, `User` |
|
||||
|
||||
### D.3 Key Object Types
|
||||
|
||||
| Type | Key Fields | Notes |
|
||||
|------|-----------|-------|
|
||||
| `Array` | `state`, `previousState`, `pendingState`, `capacity`, `boot`, `parities[]`, `disks[]`, `caches[]`, `parityCheckStatus` | Implements Node |
|
||||
| `ArrayDisk` | `id`, `idx`, `name`, `device`, `size`, `fsSize`, `fsFree`, `fsUsed`, `status`, `rotational`, `temp`, `numReads`, `numWrites`, `numErrors`, `type`, `exportable`, `warning`, `critical`, `fsType`, `comment`, `format`, `transport`, `color`, `isSpinning` | Implements Node |
|
||||
| `ArrayCapacity` | `kilobytes { free, used, total }`, `disks { free, used, total }` | |
|
||||
| `Capacity` | `free`, `used`, `total` | All String type |
|
||||
| `ParityCheck` | Parity check status/progress data | |
|
||||
| `DockerContainer` | 25+ fields (see A.4) | Implements Node |
|
||||
| `Docker` | `id`, `containers[]`, `networks[]` | Implements Node |
|
||||
| `DockerNetwork` | `name`, `id`, `created`, `scope`, `driver`, `enableIPv6`, `ipam`, etc. | |
|
||||
| `ContainerPort` | `ip`, `privatePort`, `publicPort`, `type` | |
|
||||
| `ContainerHostConfig` | JSON host configuration | |
|
||||
| `VmDomain` | `uuid/id`, `name`, `state` | Implements Node |
|
||||
| `Vms` | `id`, `domain[]` | |
|
||||
| `Info` | `time`, `baseboard`, `cpu`, `devices`, `display`, `machineId`, `memory`, `os`, `system`, `versions`, `apps` | Implements Node |
|
||||
| `InfoCpu` | `manufacturer`, `brand`, `vendor`, `family`, `model`, `stepping`, `revision`, `voltage`, `speed`, `speedmin`, `speedmax`, `threads`, `cores`, `processors`, `socket`, `cache`, `flags` | |
|
||||
| `InfoMemory` | `max`, `total`, `free`, `used`, `active`, `available`, `buffcache`, `swaptotal`, `swapused`, `swapfree`, `layout[]` | |
|
||||
| `MemoryLayout` | `bank`, `type`, `clockSpeed`, `manufacturer` | Missing `size` field (known bug) |
|
||||
| `Os` | `platform`, `distro`, `release`, `codename`, `kernel`, `arch`, `hostname`, `codepage`, `logofile`, `serial`, `build`, `uptime` | |
|
||||
| `Baseboard` | `manufacturer`, `model`, `version`, `serial` | |
|
||||
| `SystemInfo` | `manufacturer`, `model`, `version`, `serial`, `uuid` | |
|
||||
| `Versions` | `kernel`, `docker`, `unraid`, `node` | |
|
||||
| `InfoApps` | `installed`, `started` | |
|
||||
| `Network` | `iface`, `ifaceName`, `ipv4`, `ipv6`, `mac`, `internal`, `operstate`, `type`, `duplex`, `mtu`, `speed`, `carrierChanges`, `id`, `accessUrls[]` | Implements Node |
|
||||
| `AccessUrl` | `type`, `name`, `ipv4`, `ipv6` | |
|
||||
| `Share` | `name`, `free`, `used`, `size`, `include[]`, `exclude[]`, `cache`, `nameOrig`, `comment`, `allocator`, `splitLevel`, `floor`, `cow`, `color`, `luksStatus` | |
|
||||
| `Disk` (physical) | `device`, `type`, `name`, `vendor`, `size`, `bytesPerSector`, `totalCylinders`, `totalHeads`, `totalSectors`, `totalTracks`, `tracksPerCylinder`, `sectorsPerTrack`, `firmwareRevision`, `serialNum`, `interfaceType`, `smartStatus`, `temperature`, `partitions[]` | |
|
||||
| `DiskPartition` | Partition details | |
|
||||
| `Notification` | `id`, `title`, `subject`, `description`, `importance`, `link`, `type`, `timestamp`, `formattedTimestamp` | Implements Node |
|
||||
| `NotificationOverview` | `unread { info, warning, alert, total }`, `archive { info, warning, alert, total }` | |
|
||||
| `NotificationCounts` | `info`, `warning`, `alert`, `total` | |
|
||||
| `Registration` | `id`, `type`, `state`, `expiration`, `updateExpiration`, `keyFile { location, contents }` | |
|
||||
| `Connect` | `id`, `dynamicRemoteAccess { ... }` | Implements Node |
|
||||
| `RemoteAccess` | `accessType`, `forwardType`, `port` | |
|
||||
| `Cloud` | `error`, `apiKey`, `relay`, `minigraphql`, `cloud`, `allowedOrigins` | |
|
||||
| `Flash` | Flash drive information | |
|
||||
| `UnassignedDevice` | Unassigned device details | |
|
||||
| `Service` | Service name and status | |
|
||||
| `Server` | Server details (Connect-managed) | |
|
||||
| `ApiKey` | `id`, `name`, `description`, `roles[]`, `createdAt`, `permissions[]` | |
|
||||
| `ApiKeyWithSecret` | `id`, `key`, `name`, `description`, `roles[]`, `createdAt`, `permissions[]` | |
|
||||
| `Permission` | `resource`, `actions[]` | |
|
||||
| `Config` | System configuration | |
|
||||
| `Display` | Display settings | |
|
||||
| `Owner` | Server owner info | |
|
||||
| `Me` | Current user info | Implements UserAccount |
|
||||
| `User` | User account info | Implements UserAccount |
|
||||
| `Vars` | Server variables (40+ fields) | Implements Node |
|
||||
|
||||
### D.4 Input Types
|
||||
|
||||
| Input Type | Used By | Fields |
|
||||
|-----------|---------|--------|
|
||||
| `CreateApiKeyInput` | `createApiKey` | `name!`, `description`, `roles[]`, `permissions[]`, `overwrite` |
|
||||
| `AddPermissionInput` | `addPermission` | `resource!`, `actions![]` |
|
||||
| `AddRoleForUserInput` | `addRoleForUser` | User + role assignment |
|
||||
| `AddRoleForApiKeyInput` | `addRoleForApiKey` | API key + role assignment |
|
||||
| `RemoveRoleFromApiKeyInput` | `removeRoleFromApiKey` | API key + role removal |
|
||||
| `arrayDiskInput` | `addDiskToArray`, `removeDiskFromArray` | Disk assignment data |
|
||||
| `ConnectSignInInput` | `connectSignIn` | Connect credentials |
|
||||
| `EnableDynamicRemoteAccessInput` | `enableDynamicRemoteAccess` | Remote access config |
|
||||
| `AllowedOriginInput` | `setAdditionalAllowedOrigins` | Origin URLs |
|
||||
| `SetupRemoteAccessInput` | `setupRemoteAccess` | Remote access setup |
|
||||
| `NotificationData` | `createNotification`, `notifyIfUnique` | title, subject, description, importance |
|
||||
| `NotificationFilter` | `notifications.list` | Filter criteria |
|
||||
| `addUserInput` | `addUser` | User creation data |
|
||||
| `deleteUserInput` | `deleteUser` | User deletion target |
|
||||
| `usersInput` | `users` | User listing filter |
|
||||
|
||||
---
|
||||
|
||||
## E. All Enums
|
||||
|
||||
### E.1 Array & Disk Enums
|
||||
|
||||
| Enum | Values |
|
||||
|------|--------|
|
||||
| **ArrayState** | `STARTED`, `STOPPED`, `NEW_ARRAY`, `RECON_DISK`, `DISABLE_DISK`, `SWAP_DSBL`, `INVALID_EXPANSION`, `PARITY_NOT_BIGGEST`, `TOO_MANY_MISSING_DISKS`, `NEW_DISK_TOO_SMALL`, `NO_DATA_DISKS` |
|
||||
| **ArrayPendingState** | Pending state transitions (exact values not documented) |
|
||||
| **ArrayDiskStatus** | `DISK_NP`, `DISK_OK`, `DISK_NP_MISSING`, `DISK_INVALID`, `DISK_WRONG`, `DISK_DSBL`, `DISK_NP_DSBL`, `DISK_DSBL_NEW`, `DISK_NEW` |
|
||||
| **ArrayDiskType** | `Data`, `Parity`, `Flash`, `Cache` |
|
||||
| **ArrayDiskFsColor** | `GREEN_ON`, `GREEN_BLINK`, `BLUE_ON`, `BLUE_BLINK`, `YELLOW_ON`, `YELLOW_BLINK`, `RED_ON`, `RED_OFF`, `GREY_OFF` |
|
||||
| **DiskInterfaceType** | `SAS`, `SATA`, `USB`, `PCIe`, `UNKNOWN` |
|
||||
| **DiskFsType** | `xfs`, `btrfs`, `vfat`, `zfs` |
|
||||
| **DiskSmartStatus** | SMART health assessment values |
|
||||
|
||||
### E.2 Docker Enums
|
||||
|
||||
| Enum | Values |
|
||||
|------|--------|
|
||||
| **ContainerState** | `RUNNING`, `PAUSED`, `EXITED` |
|
||||
| **ContainerPortType** | `TCP`, `UDP` |
|
||||
| **UpdateStatus** | `UP_TO_DATE`, `UPDATE_AVAILABLE`, `REBUILD_READY`, `UNKNOWN` |
|
||||
|
||||
### E.3 VM Enums
|
||||
|
||||
| Enum | Values |
|
||||
|------|--------|
|
||||
| **VmState** | `NOSTATE`, `RUNNING`, `IDLE`, `PAUSED`, `SHUTDOWN`, `SHUTOFF`, `CRASHED`, `PMSUSPENDED` |
|
||||
|
||||
### E.4 Notification Enums
|
||||
|
||||
| Enum | Values |
|
||||
|------|--------|
|
||||
| **Importance** | `ALERT`, `INFO`, `WARNING` |
|
||||
| **NotificationType** | `UNREAD`, `ARCHIVE` |
|
||||
|
||||
### E.5 Auth & Permission Enums
|
||||
|
||||
| Enum | Values |
|
||||
|------|--------|
|
||||
| **Role** | `ADMIN`, `CONNECT`, `GUEST`, `VIEWER` |
|
||||
| **AuthAction** | `CREATE_ANY`, `CREATE_OWN`, `READ_ANY`, `READ_OWN`, `UPDATE_ANY`, `UPDATE_OWN`, `DELETE_ANY`, `DELETE_OWN` |
|
||||
| **Resource** (35 total) | `ACTIVATION_CODE`, `API_KEY`, `ARRAY`, `CLOUD`, `CONFIG`, `CONNECT`, `CONNECT__REMOTE_ACCESS`, `CUSTOMIZATIONS`, `DASHBOARD`, `DISK`, `DISPLAY`, `DOCKER`, `FLASH`, `INFO`, `LOGS`, `ME`, `NETWORK`, `NOTIFICATIONS`, `ONLINE`, `OS`, `OWNER`, `PERMISSION`, `REGISTRATION`, `SERVERS`, `SERVICES`, `SHARE`, `USER`, `VARS`, `VMS`, `WELCOME` |
|
||||
|
||||
### E.6 Registration Enums
|
||||
|
||||
| Enum | Values |
|
||||
|------|--------|
|
||||
| **RegistrationState** | `TRIAL`, `BASIC`, `PLUS`, `PRO`, `STARTER`, `UNLEASHED`, `LIFETIME`, `EEXPIRED`, `EGUID`, `EGUID1`, `ETRIAL`, `ENOKEYFILE`, `ENOFLASH`, `EBLACKLISTED`, `ENOCONN` |
|
||||
|
||||
### E.7 Configuration Enums
|
||||
|
||||
| Enum | Values |
|
||||
|------|--------|
|
||||
| **ConfigErrorState** | Configuration error state values |
|
||||
| **WAN_ACCESS_TYPE** | `DYNAMIC`, `ALWAYS`, `DISABLED` |
|
||||
| **WAN_FORWARD_TYPE** | WAN forwarding type values |
|
||||
|
||||
---
|
||||
|
||||
## F. API Capabilities NOT Currently in the MCP Server
|
||||
|
||||
The current MCP server has 10 tools (76 actions) after consolidation. The following capabilities are available in the Unraid API but NOT covered by any existing tool.
|
||||
|
||||
### F.1 HIGH PRIORITY - New Tool Candidates
|
||||
|
||||
#### Array Management (0 tools currently, 7 mutations available)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `start_array()` | `startArray` mutation | Core server management |
|
||||
| `stop_array()` | `stopArray` mutation | Core server management |
|
||||
| `start_parity_check(correct)` | `startParityCheck` mutation | Data integrity management |
|
||||
| `pause_parity_check()` | `pauseParityCheck` mutation | Parity management |
|
||||
| `resume_parity_check()` | `resumeParityCheck` mutation | Parity management |
|
||||
| `cancel_parity_check()` | `cancelParityCheck` mutation | Parity management |
|
||||
| `get_parity_history()` | `parityHistory` query | Historical parity check results |
|
||||
|
||||
#### Server Power Management (0 tools currently, 2 mutations available)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `shutdown_server()` | `shutdown` mutation | Remote server management |
|
||||
| `reboot_server()` | `reboot` mutation | Remote server management |
|
||||
|
||||
#### Notification Management (read-only currently, 10+ mutations available)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `create_notification(input)` | `createNotification` mutation | Proactive alerting from MCP |
|
||||
| `archive_notification(id)` | `archiveNotification` mutation | Notification lifecycle |
|
||||
| `archive_all_notifications(importance?)` | `archiveAll` mutation | Bulk management |
|
||||
| `delete_notification(id, type)` | `deleteNotification` mutation | Cleanup |
|
||||
| `delete_archived_notifications()` | `deleteArchivedNotifications` mutation | Bulk cleanup |
|
||||
| `unread_notification(id)` | `unreadNotification` mutation | Mark as unread |
|
||||
| `get_warnings_and_alerts()` | `notifications.warningsAndAlerts` query | Focused severity view |
|
||||
|
||||
#### Docker Extended Operations (3 tools currently, 10+ mutations available)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `pause_docker_container(id)` | `docker.pause` mutation | Container lifecycle |
|
||||
| `unpause_docker_container(id)` | `docker.unpause` mutation | Container lifecycle |
|
||||
| `remove_docker_container(id, with_image?)` | `docker.removeContainer` mutation | Container cleanup |
|
||||
| `update_docker_container(id)` | `docker.updateContainer` mutation | Keep containers current |
|
||||
| `update_all_docker_containers()` | `docker.updateAllContainers` mutation | Bulk updates |
|
||||
| `check_docker_updates()` | `containerUpdateStatuses` query | Pre-update assessment |
|
||||
| `get_docker_container_logs(id, since?, tail?)` | `docker.logs` query | Debugging/monitoring |
|
||||
| `list_docker_networks(all?)` | `dockerNetworks` query | Network inspection |
|
||||
| `get_docker_network(id)` | `dockerNetwork` query | Network details |
|
||||
| `check_docker_port_conflicts()` | `docker.portConflicts` query | Conflict detection |
|
||||
|
||||
#### Disk Operations (2 tools currently, 3 mutations available)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `mount_array_disk(id)` | `mountArrayDisk` mutation | Disk management |
|
||||
| `unmount_array_disk(id)` | `unmountArrayDisk` mutation | Disk management |
|
||||
| `clear_disk_statistics(id)` | `clearArrayDiskStatistics` mutation | Statistics reset |
|
||||
| `add_disk_to_array(input)` | `addDiskToArray` mutation | Array expansion |
|
||||
| `remove_disk_from_array(input)` | `removeDiskFromArray` mutation | Array modification |
|
||||
|
||||
### F.2 MEDIUM PRIORITY - New Tool Candidates
|
||||
|
||||
#### UPS Monitoring (0 tools currently, 3 queries + 1 mutation + 1 subscription)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `list_ups_devices()` | `upsDevices` query | UPS monitoring |
|
||||
| `get_ups_device(id)` | `upsDeviceById` query | UPS details |
|
||||
| `get_ups_configuration()` | `upsConfiguration` query | UPS config |
|
||||
| `configure_ups(config)` | `configureUps` mutation | UPS management |
|
||||
|
||||
#### System Metrics (0 tools currently, 1 query + 3 subscriptions)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `get_system_metrics()` | `metrics` query | Performance monitoring |
|
||||
| `get_cpu_utilization()` | `systemMetricsCpu` subscription (polled) | Real-time CPU |
|
||||
| `get_memory_utilization()` | `systemMetricsMemory` subscription (polled) | Real-time memory |
|
||||
| `get_cpu_telemetry()` | `systemMetricsCpuTelemetry` subscription (polled) | CPU temp/power |
|
||||
|
||||
#### Unassigned Devices (0 tools currently, 1 query + 1 subscription)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `list_unassigned_devices()` | `unassignedDevices` query | Device management |
|
||||
|
||||
#### Flash Drive (0 tools currently, 1 query + 1 subscription)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `get_flash_info()` | `flash` query | Flash drive status |
|
||||
|
||||
#### User Management (0 tools currently, 3 queries + 2 mutations)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `get_current_user()` | `me` query | Identity context |
|
||||
| `list_users()` | `users` query | User management |
|
||||
| `get_user(id)` | `user(id)` query | User details |
|
||||
| `add_user(input)` | `addUser` mutation | User creation |
|
||||
| `delete_user(input)` | `deleteUser` mutation | User removal |
|
||||
|
||||
#### Services (0 tools currently, 1 query + 1 subscription)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `list_services()` | `services` query | Service monitoring |
|
||||
|
||||
#### Settings (0 tools currently, 1 query)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `get_settings()` | `settings` query | Configuration inspection |
|
||||
|
||||
### F.3 LOW PRIORITY - New Tool Candidates
|
||||
|
||||
#### API Key Management (0 tools currently, 2 queries + 5 mutations)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `list_api_keys()` | `apiKeys` query | Key inventory |
|
||||
| `get_api_key(id)` | `apiKey(id)` query | Key details |
|
||||
| `create_api_key(input)` | `createApiKey` mutation | Key provisioning |
|
||||
| `delete_api_keys(input)` | `deleteApiKeys` mutation | Key cleanup |
|
||||
| `update_api_key(input)` | `updateApiKey` mutation | Key modification |
|
||||
|
||||
#### Remote Access Management (0 tools currently, 1 query + 3 mutations)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `get_remote_access()` | `remoteAccess` query | Remote access status |
|
||||
| `setup_remote_access(input)` | `setupRemoteAccess` mutation | Remote access config |
|
||||
| `enable_dynamic_remote_access(input)` | `enableDynamicRemoteAccess` mutation | Toggle remote access |
|
||||
| `set_allowed_origins(input)` | `setAdditionalAllowedOrigins` mutation | CORS config |
|
||||
|
||||
#### Cloud/Connect Management (0 tools currently, 1 query + 2 mutations)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `get_cloud_status()` | `cloud` query | Cloud connectivity |
|
||||
| `connect_sign_in(input)` | `connectSignIn` mutation | Connect auth |
|
||||
| `connect_sign_out()` | `connectSignOut` mutation | Connect deauth |
|
||||
|
||||
#### Server Management (0 tools currently, 2 queries)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `get_server_info()` | `server` query | Server details |
|
||||
| `list_servers()` | `servers` query | Multi-server view |
|
||||
| `get_online_status()` | `online` query | Connectivity check |
|
||||
| `get_owner_info()` | `owner` query | Server owner |
|
||||
|
||||
#### Display & Config (0 tools currently, 2 queries)
|
||||
|
||||
| Proposed Tool | API Operation | Why Important |
|
||||
|--------------|---------------|---------------|
|
||||
| `get_display_settings()` | `display` query | Display config |
|
||||
| `get_config()` | `config` query | System config |
|
||||
|
||||
### F.4 Summary: Coverage Statistics
|
||||
|
||||
| Category | Available in API | Covered by MCP (actions) | Gap |
|
||||
|----------|-----------------|--------------------------|-----|
|
||||
| **Queries** | ~30+ | 14 | ~16+ uncovered |
|
||||
| **Mutations** | ~50+ | 10 (start/stop Docker+VM, RClone CRUD) | ~40+ uncovered |
|
||||
| **Subscriptions** | ~30+ | 0 (2 diagnostic only) | ~30+ uncovered |
|
||||
| **Total** | ~110+ | ~24 unique API operations (76 actions across 10 tools) | ~86+ uncovered |
|
||||
|
||||
**Current coverage: approximately 22% of available API operations** (24 of ~110 unique GraphQL queries/mutations/subscriptions). Note: the MCP server exposes 76 actions, but many actions map to the same underlying API operation with different parameters.
|
||||
|
||||
---
|
||||
|
||||
## G. Community Project Capabilities
|
||||
|
||||
### G.1 unraid-management-agent (Go Plugin by Ruaan Deysel)
|
||||
|
||||
Capabilities this project offers that we do NOT:
|
||||
|
||||
| Capability | Details | Our Status |
|
||||
|-----------|---------|------------|
|
||||
| **SMART Disk Data** | Detailed SMART attributes, health monitoring | NOT available via GraphQL API (Issue #1839) |
|
||||
| **Container Logs** | Docker container log retrieval | Available via `docker.logs` query (we don't use it) |
|
||||
| **GPU Metrics** | GPU utilization, temperature, VRAM | NOT available via GraphQL API |
|
||||
| **Process Monitoring** | Active process list, resource usage | NOT available via GraphQL API |
|
||||
| **CPU Load Averages** | Real-time 1/5/15 min load averages | Available via `metrics` query (we don't use it) |
|
||||
| **Prometheus Metrics** | 41 exportable metrics at `/metrics` | NOT applicable to MCP |
|
||||
| **MQTT Publishing** | IoT event streaming | NOT applicable to MCP |
|
||||
| **Home Assistant Auto-Discovery** | MQTT auto-discovery | NOT applicable to MCP |
|
||||
| **Disk Temperature History** | Historical temp tracking | Limited via API |
|
||||
| **UPS Data** | UPS status monitoring | Available via API (we don't use it) |
|
||||
| **Plugin Information** | List installed plugins | NOT available via GraphQL API |
|
||||
| **Update Status** | Check for OS/plugin updates | NOT available via GraphQL API |
|
||||
| **Mover Control** | Invoke the mover tool | NOT available via GraphQL API (Issue #1873) |
|
||||
| **Disk Thresholds** | Warning/critical temp settings | Partially available via `ArrayDisk.warning`/`critical` |
|
||||
| **54 MCP Tools** | Full MCP tool suite | We have 10 tools (76 actions) |
|
||||
| **WebSocket Events** | Real-time event stream | We have diagnostic-only subscriptions |
|
||||
|
||||
### G.2 PSUnraid (PowerShell Module)
|
||||
|
||||
| Capability | Details | Our Status |
|
||||
|-----------|---------|------------|
|
||||
| **Server Status** | Comprehensive server overview | We have `get_system_info()` |
|
||||
| **Array Status** | Array state and disk health | We have `get_array_status()` |
|
||||
| **Docker Start/Stop/Restart** | Container lifecycle | We have start/stop only (no restart, no pause) |
|
||||
| **VM Start/Stop** | VM lifecycle | We have full VM lifecycle |
|
||||
| **Notification Retrieval** | Read notifications | We have `list_notifications()` |
|
||||
| **Restart Containers** | Dedicated restart action | We do NOT have restart (would be stop+start) |
|
||||
|
||||
### G.3 unraid-ssh-mcp
|
||||
|
||||
Chose SSH over GraphQL API due to these gaps:
|
||||
|
||||
| Missing from GraphQL API | Impact on Our Project |
|
||||
|--------------------------|----------------------|
|
||||
| Container logs | Now available in API (`docker.logs`) -- we should add it |
|
||||
| Detailed SMART data | Still missing from API (Issue #1839) |
|
||||
| Real-time CPU load | Now available via `metrics` query -- we should add it |
|
||||
| Process monitoring | Still missing from API |
|
||||
| `/proc` and `/sys` access | Not applicable via API |
|
||||
|
||||
### G.4 Home Assistant Integrations
|
||||
|
||||
#### domalab/ha-unraid
|
||||
|
||||
| Capability | Our Status |
|
||||
|-----------|------------|
|
||||
| CPU usage, temperature, power consumption | NO - missing metrics tools |
|
||||
| Memory utilization tracking | NO - missing metrics tools |
|
||||
| Per-disk and per-share metrics | PARTIAL - have basic disk/share info |
|
||||
| Docker container start/stop switches | YES |
|
||||
| VM management controls | YES |
|
||||
| UPS monitoring with energy dashboard | NO |
|
||||
| Notification counts | YES |
|
||||
| Dynamic entity creation | N/A |
|
||||
|
||||
#### chris-mc1/unraid_api
|
||||
|
||||
| Capability | Our Status |
|
||||
|-----------|------------|
|
||||
| Array status, storage utilization | YES |
|
||||
| RAM and CPU usage | NO - missing metrics |
|
||||
| Per-share free space | YES |
|
||||
| Per-disk: temperature, spin state, capacity | PARTIAL |
|
||||
|
||||
---
|
||||
|
||||
## H. Known API Bugs and Limitations
|
||||
|
||||
### H.1 Active Bugs (from GitHub Issues)
|
||||
|
||||
| Issue | Title | Impact on MCP Implementation |
|
||||
|-------|-------|------------------------------|
|
||||
| **#1837** | GraphQL partial failures | **CRITICAL**: Entire queries fail when VMs/Docker unavailable. Must implement partial failure handling with separate try/catch per section. |
|
||||
| **#1842** | Temperature inconsistency | SSD temps unavailable in `disks` query but accessible via `array` query. Use Array endpoint for temperature data. |
|
||||
| **#1840** | Docker cache invalidation | Docker container data may be stale after external changes (docker CLI). Use `skipCache: true` parameter when available. |
|
||||
| **#1825** | UPS false data | API returns hardcoded/phantom values when NO UPS is connected. Must validate UPS data before presenting to user. |
|
||||
| **#1861** | VM PMSUSPENDED issues | Cannot unsuspend VMs in `PMSUSPENDED` state. Must handle this state explicitly and warn users. |
|
||||
| **#1859** | Notification counting errors | Archive counts may include duplicates. Use `recalculateOverview` mutation to fix. |
|
||||
| **#1818** | Network query failures | GraphQL may return empty lists for network data. Handle gracefully. |
|
||||
| **#1871** | Container restart/update mutation | Single restart+update operation not yet in API. Must implement as separate stop+start. |
|
||||
| **#1873** | Mover not invocable via API | No GraphQL mutation to trigger the mover. Cannot implement mover tools. |
|
||||
| **#1839** | SMART disk data missing | Detailed SMART attributes not yet exposed via GraphQL. Major gap for disk health tools. |
|
||||
| **#1872** | CLI list missing creation dates | Timestamp data unavailable in some CLI operations. |
|
||||
|
||||
### H.2 Schema/Type Issues
|
||||
|
||||
| Issue | Description | Workaround |
|
||||
|-------|-------------|------------|
|
||||
| **Int Overflow** | Memory size fields and disk operation counters can overflow 32-bit Int. API uses `Long`/`BigInt` scalars but some fields remain problematic. | Parse values as strings, convert to Python `int` |
|
||||
| **NaN Values** | Fields `sysArraySlots`, `sysCacheSlots`, `cacheNumDevices`, `cacheSbNumDisks` in `vars` query can return NaN. | Query only curated subset of `vars` fields (current approach) |
|
||||
| **Non-nullable Null** | `info.devices` section has non-nullable fields that return null in practice. | Avoid querying `info.devices` entirely (current approach) |
|
||||
| **Memory Layout Size** | Individual memory stick `size` values not returned by API. | Cannot calculate total memory from layout data |
|
||||
| **PrefixedID Format** | IDs follow `TypePrefix:uuid` format. Clients must handle as opaque strings. | Already handled in current implementation |
|
||||
|
||||
### H.3 Infrastructure Limitations
|
||||
|
||||
| Limitation | Description | Impact |
|
||||
|-----------|-------------|--------|
|
||||
| **Rate Limiting** | 100 requests per 10 seconds (`@nestjs/throttler`). | Must implement request queuing/backoff for bulk operations |
|
||||
| **EventEmitter Limit** | Max 30 concurrent subscription listeners. | Limit simultaneous subscription tools |
|
||||
| **Disk Operation Timeouts** | Disk queries require 90s+ read timeouts. | Already handled with custom timeout config |
|
||||
| **Docker Size Queries** | `sizeRootFs` query is expensive. | Make it optional in list queries, only include in detail queries |
|
||||
| **Storage Polling Interval** | SMART query overhead means storage data should poll at 5min minimum. | Rate-limit storage-related subscriptions |
|
||||
| **Cache TTL** | cache-manager v7 expects TTL in milliseconds (not seconds). | Correct TTL units in any caching implementation |
|
||||
| **Schema Volatility** | API schema is still evolving between versions. | Consider version-checking at startup, graceful degradation |
|
||||
| **Nchan Memory** | WebSocket subscriptions can cause Nginx memory exhaustion (mitigated in 7.1.0+ but still possible). | Limit concurrent subscriptions, implement reconnection logic |
|
||||
| **SSL/TLS** | Self-signed certificates require special handling for local IP access. | Already handled via `UNRAID_VERIFY_SSL` env var |
|
||||
| **Version Dependency** | Full API requires Unraid 7.2+. Pre-7.2 needs Connect plugin. | Document minimum version requirements per tool |
|
||||
|
||||
### H.4 Features Requested but NOT Yet in API
|
||||
|
||||
| Feature | GitHub Issue | Status |
|
||||
|---------|-------------|--------|
|
||||
| Mover invocation | #1873 | Open feature request |
|
||||
| SMART disk data | #1839 | Open feature request (was bounty candidate) |
|
||||
| System temperature monitoring (CPU, GPU, motherboard, NVMe, chipset) | #1597 | Open bounty (not implemented) |
|
||||
| Container restart+update single mutation | #1871 | Open feature request |
|
||||
| Docker Compose native support | Roadmap TBD | Under consideration |
|
||||
| Plugin information/management via API | Not filed | Not exposed |
|
||||
| File browser/upload/download | Not filed | Legacy PHP WebGUI only |
|
||||
| Process list monitoring | Not filed | Not exposed |
|
||||
| GPU metrics | Not filed | Not exposed |
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Proposed New Tool Count by Priority
|
||||
|
||||
| Priority | Category | New Tools | Total After |
|
||||
|----------|----------|-----------|-------------|
|
||||
| **HIGH** | Array Management | 7 | |
|
||||
| **HIGH** | Server Power | 2 | |
|
||||
| **HIGH** | Notification Mutations | 7 | |
|
||||
| **HIGH** | Docker Extended | 10 | |
|
||||
| **HIGH** | Disk Operations | 5 | |
|
||||
| | **High Priority Subtotal** | **31** | **57** |
|
||||
| **MEDIUM** | UPS Monitoring | 4 | |
|
||||
| **MEDIUM** | System Metrics | 4 | |
|
||||
| **MEDIUM** | Unassigned Devices | 1 | |
|
||||
| **MEDIUM** | Flash Drive | 1 | |
|
||||
| **MEDIUM** | User Management | 5 | |
|
||||
| **MEDIUM** | Services | 1 | |
|
||||
| **MEDIUM** | Settings | 1 | |
|
||||
| | **Medium Priority Subtotal** | **17** | **74** |
|
||||
| **LOW** | API Key Management | 5 | |
|
||||
| **LOW** | Remote Access | 4 | |
|
||||
| **LOW** | Cloud/Connect | 3 | |
|
||||
| **LOW** | Server Management | 4 | |
|
||||
| **LOW** | Display & Config | 2 | |
|
||||
| | **Low Priority Subtotal** | **18** | **92** |
|
||||
| | **GRAND TOTAL NEW TOOLS** | **66** | **92** |
|
||||
|
||||
**Current tools: 10 (76 actions) | Potential total: ~110+ operations | Remaining gap: ~20+ uncovered operations**
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Data Sources Cross-Reference
|
||||
|
||||
| Document | Lines | Key Contributions |
|
||||
|----------|-------|-------------------|
|
||||
| `unraid-api-research.md` | 819 | API overview, auth flow, query/mutation examples, version history, recommendations |
|
||||
| `unraid-api-source-analysis.md` | 998 | Complete resolver listing, PubSub channels, mutation details, open issues, community projects |
|
||||
| `unraid-api-exa-research.md` | 569 | DeepWiki architecture, rate limits, OIDC providers, Python client library, MCP listings |
|
||||
| `unraid-api-crawl.md` | 1451 | Complete GraphQL schema (Query/Mutation/Subscription types), CLI reference, all enums/scalars |
|
||||
| `raw/release-7.0.0.md` | 958 | ZFS support, VM snapshots/clones, File Manager, Tailscale, notification agents |
|
||||
| `raw/release-7.2.0.md` | 348 | API built into OS, responsive WebGUI, RAIDZ expansion, SSO, Ext2/3/4/NTFS/exFAT support |
|
||||
| `raw/blog-api-bounty.md` | 139 | Feature Bounty Program, community projects showcase |
|
||||
@@ -1,176 +0,0 @@
|
||||
* [Unraid News](https://unraid.net/blog)
|
||||
|
||||
|
||||
29 October 2025
|
||||
|
||||
Unraid OS 7.2.0 Stable is Now Available
|
||||
=======================================
|
||||
|
||||
Unraid 7.2.0 delivers a **fully responsive web interface, expanded filesystem support, a built-in, open-source API**, **ZFS RAIDZ Expansion,** and much more!
|
||||
|
||||

|
||||
|
||||
**Your Server: More Responsive, Secure, and More Flexible than ever.**
|
||||
|
||||
Building on months of testing and feedback, this release brings major quality-of-life improvements for new and seasoned users alike. Whether you're upgrading your homelab or deploying at scale, this release brings more control, compatibility, and confidence to every system.
|
||||
|
||||
We want to give a huge thanks to the _over 5,000 beta testers_ that helped bring this release to Stable.
|
||||
|
||||
**Fully Responsive WebGUI**
|
||||
---------------------------
|
||||
|
||||
Unraid now adapts seamlessly to any screen size. The redesigned WebGUI ensures smooth operation across desktops, tablets, and mobile devices making it easier than ever to manage your server from anywhere, with any device.
|
||||
|
||||
### See the Responsive Webgui in action
|
||||
|
||||
**Expand Your RAIDZ Pools and Bring Every Drive With You**
|
||||
----------------------------------------------------------
|
||||
|
||||
### **ZFS RAIDZ Expansion**
|
||||
|
||||
You can now expand your single-vdev RAIDZ1/2/3 pools, one drive at a time!
|
||||
|
||||
1. Stop the array
|
||||
2. On _**Main → Pool Devices,**_ add a slot to the pool
|
||||
3. Select the appropriate drive. _Note: must be at least as large as the smallest drive in the pool._
|
||||
4. Start the array
|
||||
|
||||
### See How RAIDZ Expansion Works
|
||||
|
||||
### **External Drive Support: ext2/3/4, NTFS, exFAT**
|
||||
|
||||
Alongside XFS, BTRFS, and the ZFS file systems, Unraid now supports ext2 / ext3 / ext4, NTFS, and exFAT out of the box, making it easier to import data from external sources or legacy systems.
|
||||
|
||||
This means you can _create an array or single device pool with existing drives formatted in Ext2/3/4 or NTFS, and you can format drives in Ext4 or NTFS._
|
||||
|
||||
### Learn How Unraid Handles ext, NTFS, and exFAT Out of the Box
|
||||
|
||||
Cyber Weekend is Coming
|
||||
-----------------------
|
||||
|
||||
Don’t miss our biggest sale of the year November 28-December 1st. Subscribe to the [Unraid Digest](https://newsletter.unraid.net/)
|
||||
and be the first to know all of the details!
|
||||
|
||||
[Subscribe](https://newsletter.unraid.net/)
|
||||
|
||||
**Unraid API**
|
||||
--------------
|
||||
|
||||
The [**Unraid API**](https://docs.unraid.net/API/)
|
||||
is now integrated directly into Unraid OS, giving developers and power users new ways to interact with their systems.
|
||||
|
||||
The new **Notifications panel** is the first major feature built on this foundation, and over time, more of the webGUI will transition to use the API for faster, more dynamic updates.
|
||||
|
||||
The API is fully [**open source**](https://github.com/unraid/api)
|
||||
, providing direct access to system data and functionality for building automations, dashboards, and third‑party integrations. It also supports [**external authentication (OIDC)**](https://docs.unraid.net/API/oidc-provider-setup/)
|
||||
for secure, scalable access.
|
||||
|
||||
### See the Unraid API in Action!
|
||||
|
||||
Learn More about the Unraid API
|
||||
-------------------------------
|
||||
|
||||
* #### [Follow along the Unraid API Roadmap](https://docs.unraid.net/API/upcoming-features/)
|
||||
|
||||
* #### [See current apps using the Unraid API](https://discord.com/channels/216281096667529216/1375651142704566282)
|
||||
|
||||
|
||||
**Additional Improvements and Fixes**
|
||||
-------------------------------------
|
||||
|
||||
### **Storage & Array**
|
||||
|
||||
* Two-device ZFS pools default to mirrors; use RAIDZ1 for future vdev expansion
|
||||
* New _File System Status_ shows if drives are mounted and/or empty
|
||||
* Exclusive shares now exportable via NFS
|
||||
* Restricted special share names (homes, global, printers)
|
||||
* Improved SMB config (smb3 directory leases = no) and security settings UX
|
||||
* Better handling for parity disks with 1MiB partitions
|
||||
* BTRFS mounts more reliably with multiple FS signatures
|
||||
* New drives now repartitioned when added to parity-protected arrays
|
||||
* Devices in SMART test won’t spin down
|
||||
* Cleaner handling of case-insensitive share names and invalid characters
|
||||
* ZFS vdevs now display correctly in allocation profiles
|
||||
|
||||
### **VM Manager**
|
||||
|
||||
* Console access now works even when user shares are disabled
|
||||
* Single quotes are no longer allowed in the Domains storage path
|
||||
* Windows 11 defaults have been updated for better compatibility
|
||||
* Cdrom Bus now defaults to IDE for i440fx and SATA for Q35 machines
|
||||
* Vdisk locations now display properly in non-English languages
|
||||
* You'll now see a warning when adding a second vdisk if you forget to assign a capacity
|
||||
|
||||
### **WebGUI**
|
||||
|
||||
* Network and RAM stats now shown in human-readable units
|
||||
* Font size and layout fixes
|
||||
* Better error protection for PHP-based failures
|
||||
|
||||
### Miscellaneous **Improvements**
|
||||
|
||||
* Better logging during plugin installs
|
||||
* Added safeguards to protect WebGUI from fatal PHP errors
|
||||
* Diagnostics ZIPs are now further anonymized
|
||||
* Resolved crash related to Docker container CPU pinning
|
||||
* Fixed Docker NAT issue caused by missing br\_netfilter
|
||||
* Scheduled mover runs are now properly logged
|
||||
|
||||
### **Kernel & Packages**
|
||||
|
||||
* Linux Kernel 6.12.54-Unraid
|
||||
* Samba 4.23.2
|
||||
* Updated versions of openssl, mesa, kernel-firmware, git, exfatprogs, and more
|
||||
|
||||
**Plugin Compatibility Notice**
|
||||
-------------------------------
|
||||
|
||||
To maintain stability with the new responsive WebGUI, the following plugins will be removed during upgrade if present:
|
||||
|
||||
* **Theme Engine**
|
||||
* **Dark Theme**
|
||||
* **Dynamix Date Time**
|
||||
* **Flash Remount**
|
||||
* **Outdated versions of Unraid Connect**
|
||||
|
||||
Please update all other plugins—**especially Unraid Connect and Nvidia Driver**—before upgrading!
|
||||
|
||||
Unraid 7.2.0
|
||||
------------
|
||||
|
||||
Important Release Links
|
||||
|
||||
* #### [Docs](https://docs.unraid.net/unraid-os/release-notes/7.2.0/)
|
||||
|
||||
Version 7.2.0 Full Release Notes
|
||||
|
||||
* #### [Forum Thread](https://forums.unraid.net/topic/194610-unraid-os-version-720-available/)
|
||||
|
||||
Unraid 7.2.0 Forum Thread
|
||||
|
||||
* #### [Known Issues](https://docs.unraid.net/unraid-os/release-notes/7.2.0/#known-issues)
|
||||
|
||||
See the Known Issues for the Unraid 7.2 series
|
||||
|
||||
* #### [Learn More](https://docs.unraid.net/unraid-os/system-administration/maintain-and-update/upgrading-unraid/#standard-upgrade-process)
|
||||
|
||||
Ready to Upgrade? Visit your server’s Tools → Update OS page to install Unraid 7.2.0.
|
||||
|
||||
|
||||

|
||||
|
||||
Pricing
|
||||
-------
|
||||
|
||||
With affordable options starting at just $49, we have a license for everyone.
|
||||
|
||||
[Buy Now](https://account.unraid.net/buy)
|
||||
|
||||

|
||||
|
||||
Try before you buy
|
||||
------------------
|
||||
|
||||
Not sure if Unraid is right for you? Take Unraid for a test drive for 30 days—no credit card required.
|
||||
|
||||
[Free Trial](https://unraid.net/getting-started)
|
||||
@@ -1,139 +0,0 @@
|
||||
* [Unraid News](https://unraid.net/blog)
|
||||
|
||||
|
||||
5 September 2025
|
||||
|
||||
Introducing the Unraid API Feature Bounty Program
|
||||
=================================================
|
||||
|
||||
We’re opening new doors for developers and power users to directly shape the Unraid experience, together.
|
||||
|
||||
The new [Unraid API](https://docs.unraid.net/API/)
|
||||
has already come a long way as a powerful, open-source toolkit that unlocks endless possibilities for automation, integrations, and third-party applications. With each release, we’ve seen the creativity of our community take center stage, building tools that extend the Unraid experience in ways we never imagined.
|
||||
|
||||
Now, we’re taking it one step further with the [**Unraid API Feature Bounty Program**.](https://unraid.net/feature-bounty)
|
||||
|
||||
### **What Is the Feature Bounty Program?**
|
||||
|
||||
The bounty program gives developers (and adventurous users) a way to directly contribute to the Unraid API roadmap. Here’s how it works:
|
||||
|
||||
1. **Feature Requests Become Bounties:** We post specific API features that would benefit the entire Unraid ecosystem.
|
||||
2. **You Build & Contribute:** Developers who implement these features can claim the bounty, earn recognition, and a monetary reward.
|
||||
3. **Community Driven Growth:** Instead of waiting for features to arrive, you can help build them, get rewarded, and help the Unraid community.
|
||||
|
||||
Our core team focuses on high-priority roadmap items. Bounties give the community a way to help accelerate other highly requested features by bringing more ideas to life, faster, with recognition and reward for those who contribute.
|
||||
|
||||
API Feature Bounty Program Details
|
||||
----------------------------------
|
||||
|
||||
You can turn feature requests into reality, get rewarded for your contributions, and help grow the open-source Unraid API ecosystem.
|
||||
|
||||
[Learn More](https://unraid.net/feature-bounty)
|
||||
|
||||
### **The Open-Source Unraid API**
|
||||
|
||||
Alongside the bounty program, we’re thrilled to highlight just how open and flexible the Unraid API has become. Whether you’re scripting via the CLI, building automations with the API, or integrating with external identity providers through OAuth2/OIDC, the API is designed to be transparent and extensible.
|
||||
|
||||
API Docs
|
||||
--------
|
||||
|
||||
Learn about how to get started with the Unraid API.
|
||||
|
||||
[Start Here](https://docs.unraid.net/API/)
|
||||
|
||||
OIDC Provider Setup
|
||||
-------------------
|
||||
|
||||
Configure OIDC providers for SSO authentication in the Unraid API using the web interface.
|
||||
|
||||
[OIDC](https://docs.unraid.net/API/oidc-provider-setup/)
|
||||
|
||||
Upcoming API Features
|
||||
---------------------
|
||||
|
||||
The roadmap outlines completed and planned features for the Unraid API. Features and timelines may change based on development priorities and community feedback.
|
||||
|
||||
[Learn More](https://docs.unraid.net/API/upcoming-features/)
|
||||
|
||||
Community API Projects in Action
|
||||
--------------------------------
|
||||
|
||||
The power of an open API is best shown by what you build with it. Here are just a few highlights from the community so far!
|
||||
|
||||

|
||||
|
||||
### [Unraid Mobile App](https://forums.unraid.net/topic/189522-unraid-mobile-app/)
|
||||
|
||||
by S3ppo
|
||||
|
||||

|
||||
|
||||
### [Homepage Dashboard Widget](https://discord.com/channels/216281096667529216/1379497640110063656)
|
||||
|
||||
by surf108
|
||||
|
||||

|
||||
|
||||
### [Home Assistant Integration](https://github.com/domalab/ha-unraid-connect)
|
||||
|
||||
by domalab
|
||||
|
||||

|
||||
|
||||
[Unloggarr (AI-powered log analysis)](https://github.com/jmagar/unloggarr)
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
by jmagar
|
||||
|
||||

|
||||
|
||||
[nzb360 Mobile App (Android)](https://play.google.com/store/apps/details?id=com.kevinforeman.nzb360&hl=en_US)
|
||||
|
||||
--------------------------------------------------------------------------------------------------------------
|
||||
|
||||
by nzb360dev
|
||||
|
||||

|
||||
|
||||
[API Show and Tell](https://discord.com/channels/216281096667529216/1375651142704566282)
|
||||
|
||||
-----------------------------------------------------------------------------------------
|
||||
|
||||
Show off your project or see them all in action on our Discord channel!
|
||||
|
||||
Get Involved
|
||||
------------
|
||||
|
||||
Whether you’re a developer looking to contribute, or a user eager to see your most-wanted features come to life, the new Unraid API Feature Bounty Program is your chance to help shape the future of Unraid. The Unraid API is open and the bounties are live!
|
||||
|
||||
* #### [Feature Bounty Program](https://unraid.net/feature-bounty)
|
||||
|
||||
Learn More about the Feature Bounty Program
|
||||
|
||||
* #### [Claim Bounties](https://github.com/orgs/unraid/projects/3/views/1)
|
||||
|
||||
Browse the live bounty board
|
||||
|
||||
* #### [API Info](https://docs.unraid.net/API/)
|
||||
|
||||
Read the API Docs
|
||||
|
||||
|
||||

|
||||
|
||||
Pricing
|
||||
-------
|
||||
|
||||
With affordable options starting at just $49, we have a license for everyone.
|
||||
|
||||
[Buy Now](https://account.unraid.net/buy)
|
||||
|
||||

|
||||
|
||||
Try before you buy
|
||||
------------------
|
||||
|
||||
Not sure if Unraid is right for you? Take Unraid for a test drive for 30 days—no credit card required.
|
||||
|
||||
[Free Trial](https://unraid.net/getting-started)
|
||||
@@ -1,259 +0,0 @@
|
||||
[Skip to main content](https://docs.unraid.net/unraid-connect/overview-and-setup#__docusaurus_skipToContent_fallback)
|
||||
|
||||
On this page
|
||||
|
||||
**Unraid Connect** is a cloud-enabled companion designed to enhance your Unraid OS server experience. It makes server management, monitoring, and maintenance easier than ever, bringing cloud convenience directly to your homelab or business setup.
|
||||
|
||||
Unraid Connect works seamlessly with Unraid OS, boosting your server experience without altering its core functions. You can think of Unraid Connect as your remote command center. It expands the capabilities of your Unraid server by providing secure, web-based access and advanced features, no matter where you are.
|
||||
|
||||
With Unraid Connect, you can:
|
||||
|
||||
* Remotely access and manage your Unraid server from any device, anywhere in the world.
|
||||
* Monitor real-time server health and resource usage, including storage, network, and Docker container status.
|
||||
* Perform and schedule secure online flash backups to protect your configuration and licensing information.
|
||||
* Receive notifications about server health, storage status, and critical events.
|
||||
* Use dynamic remote access and server deep linking to navigate to specific management pages or troubleshoot issues quickly.
|
||||
* Manage multiple servers from a single dashboard, making it perfect for users with more than one Unraid system.
|
||||
|
||||
Unraid Connect is more than just an add-on; it's an essential extension of the Unraid platform, designed to maximize the value, security, and convenience of your Unraid OS investment.
|
||||
|
||||
[**Click here to dive in to Unraid Connect!**](https://connect.myunraid.net/)
|
||||
|
||||
Data collection and privacy[](https://docs.unraid.net/unraid-connect/overview-and-setup#data-collection-and-privacy "Direct link to Data collection and privacy")
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
**Click to see what data is collected and how we handle it**
|
||||
|
||||
Unraid Connect prioritizes your privacy and transparency. Here’s what you need to know about how we handle your data:
|
||||
|
||||
### What data is collected and why
|
||||
|
||||
When your server connects to Unraid.net, it establishes a secure connection to our infrastructure and transmits only the necessary data required for a seamless experience in the Unraid Connect Dashboard. This includes:
|
||||
|
||||
* Server hostname, description, and icon
|
||||
* Keyfile details and flash GUID
|
||||
* Local access URL and LAN IP (only if a certificate is installed)
|
||||
* Remote access URL and WAN IP (if remote access is turned on)
|
||||
* Installed Unraid version and uptime
|
||||
* Unraid Connect plugin version and unraid-api version/uptime
|
||||
* Array size and usage (only numbers, no file specifics)
|
||||
* Number of Docker containers and VMs installed and running
|
||||
|
||||
We use this data solely to enable Unraid Connect features, such as remote monitoring, management, and notifications. It is not used for advertising or profiling.
|
||||
|
||||
### Data retention policy
|
||||
|
||||
* We only keep the most recent update from your server; no past data is stored.
|
||||
* Data is retained as long as your server is registered and using Unraid Connect.
|
||||
* To delete your data, simply uninstall the plugin and remove any SSL certificates issued through Let's Encrypt.
|
||||
|
||||
### Data sharing
|
||||
|
||||
* Your data is **not shared with third parties** unless it is necessary for Unraid Connect services, such as certificate provisioning through Let's Encrypt.
|
||||
* We do not collect or share any user content, file details, or personal information beyond what is specified above.
|
||||
|
||||
For more details, check out our [Policies](https://unraid.net/policies)
|
||||
page.
|
||||
|
||||
Installation[](https://docs.unraid.net/unraid-connect/overview-and-setup#installation "Direct link to Installation")
|
||||
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Unraid Connect is available as a plugin that requires Unraid OS 6.10 or later. Before you start, make sure your server is connected to the internet and you have the [Community Applications](https://docs.unraid.net/unraid-os/using-unraid-to/run-docker-containers/community-applications/)
|
||||
plugin installed.
|
||||
|
||||
To install Unraid Connect:
|
||||
|
||||
1. Navigate to the **Apps** tab in the Unraid WebGUI.
|
||||
2. Search for **Unraid Connect** and proceed to install the plugin. Wait for the installation to fully complete before closing the dialog.
|
||||
3. In the top right corner of your Unraid WebGUI, click on the Unraid logo and select **Sign In**.
|
||||
|
||||

|
||||
|
||||
4. Sign in with your Unraid.net credentials or create a new account if necessary.
|
||||
5. Follow the on-screen instructions to register your server with Unraid Connect.
|
||||
6. After registration, you can access the [Unraid Connect Dashboard](https://connect.myunraid.net/)
|
||||
for centralized management.
|
||||
|
||||
note
|
||||
|
||||
Unraid Connect requires a myunraid.net certificate for secure remote management and access. To provision a certificate, go to _**Settings → Management Access**_ in the WebGUI and click **Provision** under the Certificate section.
|
||||
|
||||
Dashboard[](https://docs.unraid.net/unraid-connect/overview-and-setup#dashboard "Direct link to Dashboard")
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------
|
||||
|
||||
The **Unraid Connect Dashboard** offers a centralized, cloud-based view of all your registered Unraid servers, with features like:
|
||||
|
||||
* **My Servers:** All linked servers appear in a sidebar and as interactive tiles for easy selection.
|
||||
* **Status (at a glance):** Quickly see which servers are online or offline, along with their Unraid OS version, license type, and recent activity.
|
||||
* **Health and alerts:** Visual indicators show server health, notifications, and update status.
|
||||
|
||||
When you click **Details** on a server, you will see:
|
||||
|
||||
* **Online/Offline:** Real-time connectivity status.
|
||||
* **License type:** Starter, Unleashed, or Lifetime.
|
||||
* **Uptime:** Duration the server has been running.
|
||||
* **Unraid OS version:** Current version and update availability.
|
||||
* **Storage:** Total and free space on all arrays and pools.
|
||||
* **Health metrics:** CPU usage, memory usage, and temperature (if supported).
|
||||
* **Notifications:** Hardware/software alerts, warnings, and errors.
|
||||
* **Flash backup:** Status and date of the last successful backup.
|
||||
|
||||
* * *
|
||||
|
||||
Managing your server remotely[](https://docs.unraid.net/unraid-connect/overview-and-setup#managing-your-server-remotely "Direct link to Managing your server remotely")
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
tip
|
||||
|
||||
To use all management features, provision a myunraid.net certificate under _**Settings → Management Access**_ on your server.
|
||||
|
||||
With a valid **myunraid.net** certificate, Unraid Connect enables secure, remote server management directly from the Connect web interface.
|
||||
|
||||
Remote management features include:
|
||||
|
||||
* **Remote WebGUI access:** Access the WebGUI from anywhere.
|
||||
* **Array controls:** Start or stop arrays and manage storage pools.
|
||||
* **Docker and VM management:** View, start, stop, and monitor containers and VMs.
|
||||
* **Parity & Scrub:** Launch parity check or ZFS/BTRFS scrub jobs
|
||||
* **Flash backup:** Trigger and monitor flash device backups to the cloud.
|
||||
* **Diagnostics:** Download a diagnostics zip for support
|
||||
* **Notifications:** Review and acknowledge system alerts.
|
||||
* **Server controls:** Reboot or shut down your server remotely.
|
||||
* **User management:** Manage Unraid.net account access and registration.
|
||||
|
||||
You can manage multiple servers from any device - phone, tablet, or computer - with a single browser window.
|
||||
|
||||
* * *
|
||||
|
||||
Deep linking[](https://docs.unraid.net/unraid-connect/overview-and-setup#deep-linking "Direct link to Deep linking")
|
||||
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Deep linking in Unraid Connect lets you jump directly to specific sections of your Unraid WebGUI with a single click. Simply click any of the circled link buttons (below) in the Connect interface to go straight to the relevant management page for your server.
|
||||
|
||||

|
||||
|
||||
* * *
|
||||
|
||||
Customization[](https://docs.unraid.net/unraid-connect/overview-and-setup#customization "Direct link to Customization")
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Unraid Connect provides a flexible dashboard experience, allowing you to personalize your server view and appearance. The customization options are organized below for easy reference.
|
||||
|
||||
* Change banner image
|
||||
* Rearrange dashboard tiles
|
||||
* Switch themes
|
||||
|
||||
To display your server’s banner image on the Connect dashboard, upload or select a banner image from your WebGUI under _**Settings → Display Settings → Banner**_. This banner will automatically appear in your Connect dashboard for that server.
|
||||
|
||||
You can customize your dashboard layout by dragging and dropping server tiles. In the Connect dashboard, click the hamburger (≡) button on any tile to rearrange its position. This allows you to prioritize the information and the services most important to you.
|
||||
|
||||
Toggle between dark and light mode by clicking the Sun or Moon icon on the far right of the Connect UI. Your theme preference will be instantly applied across the Connect dashboard for a consistent experience.
|
||||
|
||||
* * *
|
||||
|
||||
License management[](https://docs.unraid.net/unraid-connect/overview-and-setup#license-management "Direct link to License management")
|
||||
|
||||
----------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Managing your licenses in Unraid Connect is easy. Under the **My Keys** section, you can:
|
||||
|
||||
* View or reissue a key to a new USB.
|
||||
* Upgrade your license tier directly from the Connect UI.
|
||||
* Download registration key files for backup or transfer.
|
||||
* Review license status and expiration (if applicable).
|
||||
|
||||

|
||||
|
||||
You don’t need to leave the Connect interface to manage or upgrade your licenses.
|
||||
|
||||
* * *
|
||||
|
||||
Language localization[](https://docs.unraid.net/unraid-connect/overview-and-setup#language-localization "Direct link to Language localization")
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Unraid Connect supports multiple languages to cater to a global user base. You can change your language preference through the language selector in the Connect interface.
|
||||
|
||||
To change your language preference:
|
||||
|
||||
1. Open the Connect UI.
|
||||
2. Go to the language selector.
|
||||
|
||||

|
||||
|
||||
3. Select your preferred language from the list.
|
||||
|
||||
The interface will update automatically to reflect your selection.
|
||||
|
||||
* * *
|
||||
|
||||
Signing out[](https://docs.unraid.net/unraid-connect/overview-and-setup#signing-out "Direct link to Signing out")
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
You can sign out of Unraid Connect anytime from _**Settings → Management Access → Unraid Connect → Account Status**_ by clicking the **Sign Out** button.
|
||||
|
||||
When you sign out:
|
||||
|
||||
* Your server remains listed on the Connect dashboard, but you lose access to remote management features.
|
||||
* Remote access, cloud-based flash backups, and other Unraid Connect features will be disabled for that server.
|
||||
* You can still download your registration keys, but you cannot manage or monitor the server remotely until you sign in again.
|
||||
* Signing out does **not** disconnect your server from the local network or affect local access.
|
||||
|
||||
* * *
|
||||
|
||||
Uninstalling the plugin[](https://docs.unraid.net/unraid-connect/overview-and-setup#uninstalling-the-plugin "Direct link to Uninstalling the plugin")
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
When you uninstall the Unraid Connect plugin:
|
||||
|
||||
* All flash backup files will be deactivated and deleted from your local flash drive.
|
||||
* Cloud backups are marked for removal from Unraid servers; they will be retained for 30 days, after which they are permanently purged. For immediate deletion, [disable Flash Backup](https://docs.unraid.net/unraid-connect/automated-flash-backup/)
|
||||
before uninstalling.
|
||||
* Remote access will be disabled. Ensure that you remove any related port forwarding rules from your router.
|
||||
* Your server will be signed out of Unraid.net.
|
||||
|
||||
note
|
||||
|
||||
Uninstalling the plugin does **not** revert your server's URL from `https://yourpersonalhash.unraid.net` to `http://computername`. If you wish to change your access URL, refer to [Disabling SSL for local access](https://docs.unraid.net/unraid-os/system-administration/secure-your-server/securing-your-connection/#disabling-ssl-for-local-access)
|
||||
.
|
||||
|
||||
* * *
|
||||
|
||||
Connection errors[](https://docs.unraid.net/unraid-connect/overview-and-setup#connection-errors "Direct link to Connection errors")
|
||||
|
||||
-------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
If you encounter connection errors in Unraid Connect, [open a terminal](https://docs.unraid.net/unraid-os/system-administration/advanced-tools/command-line-interface/)
|
||||
from the WebGUI and run:
|
||||
|
||||
unraid-api restart
|
||||
|
||||
* [Data collection and privacy](https://docs.unraid.net/unraid-connect/overview-and-setup#data-collection-and-privacy)
|
||||
|
||||
* [Installation](https://docs.unraid.net/unraid-connect/overview-and-setup#installation)
|
||||
|
||||
* [Dashboard](https://docs.unraid.net/unraid-connect/overview-and-setup#dashboard)
|
||||
|
||||
* [Managing your server remotely](https://docs.unraid.net/unraid-connect/overview-and-setup#managing-your-server-remotely)
|
||||
|
||||
* [Deep linking](https://docs.unraid.net/unraid-connect/overview-and-setup#deep-linking)
|
||||
|
||||
* [Customization](https://docs.unraid.net/unraid-connect/overview-and-setup#customization)
|
||||
|
||||
* [License management](https://docs.unraid.net/unraid-connect/overview-and-setup#license-management)
|
||||
|
||||
* [Language localization](https://docs.unraid.net/unraid-connect/overview-and-setup#language-localization)
|
||||
|
||||
* [Signing out](https://docs.unraid.net/unraid-connect/overview-and-setup#signing-out)
|
||||
|
||||
* [Uninstalling the plugin](https://docs.unraid.net/unraid-connect/overview-and-setup#uninstalling-the-plugin)
|
||||
|
||||
* [Connection errors](https://docs.unraid.net/unraid-connect/overview-and-setup#connection-errors)
|
||||
@@ -1,181 +0,0 @@
|
||||
# Remote Access (Unraid Connect)
|
||||
|
||||
> **Source:** [Unraid Documentation - Remote Access](https://docs.unraid.net/unraid-connect/remote-access)
|
||||
> **Scraped:** 2026-02-07 | Raw content for reference purposes
|
||||
|
||||
Unlock secure, browser-based access to your Unraid WebGUI from anywhere with remote access. This feature is ideal for managing your server when you're away from home - no complicated networking or VPN Tunnel setup is required. For more advanced needs, such as connecting to Docker containers or accessing network drives, a VPN Tunnel remains the recommended solution.
|
||||
|
||||
Security reminder
|
||||
|
||||
Before enabling remote access, ensure your root password is strong and unique. Update it on the **Users** page if required. Additionally, keep your Unraid OS updated to the latest version to protect against security vulnerabilities. [Learn more about updating Unraid here](https://docs.unraid.net/unraid-os/system-administration/maintain-and-update/upgrading-unraid/)
|
||||
.
|
||||
|
||||
Remote access through Unraid Connect provides:
|
||||
|
||||
* **Convenience** - Quickly access your server’s management interface from anywhere, using a secure, cloud-managed connection.
|
||||
* **Security** - Dynamic access modes limit exposure by only allowing access to the internet when necessary, which helps reduce risks from automated attacks.
|
||||
* **Simplicity** - No need for manual port forwarding or VPN client setup for basic management tasks.
|
||||
|
||||
tip
|
||||
|
||||
For full network access or advanced use cases, consider setting up [Tailscale](https://docs.unraid.net/unraid-os/system-administration/secure-your-server/tailscale/)
|
||||
or a VPN solution.
|
||||
|
||||
* * *
|
||||
|
||||
Initial setup[](https://docs.unraid.net/unraid-connect/remote-access#initial-setup "Direct link to Initial setup")
|
||||
|
||||
--------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
To enable remote access:
|
||||
|
||||
1. In the Unraid WebGUI, navigate to _**Settings → Management Access**_.
|
||||
2. Check the **HTTPS port** (default: 443). If this port is in use (e.g., by Docker), select an unused port above 1000 (like 3443, 4443, or 5443).
|
||||
3. Click **Apply** if you changed any settings.
|
||||
4. Under **CA-signed certificate file**, click **Provision** to generate a trusted certificate.
|
||||
|
||||
Your Unraid server will be ready to accept secure remote connections via the WebGUI, using the configured port and a trusted certificate.
|
||||
|
||||
* * *
|
||||
|
||||
Choosing a remote access type[](https://docs.unraid.net/unraid-connect/remote-access#choosing-a-remote-access-type "Direct link to Choosing a remote access type")
|
||||
|
||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Unraid Connect offers two modes:
|
||||
|
||||
* Dynamic remote access
|
||||
* Static remote access
|
||||
|
||||
**Dynamic remote access** provides secure, on-demand access to your WebGUI.
|
||||
|
||||
* **Access is enabled only when you need it.** The WebGUI remains closed to the internet by default, minimizing the attack surface.
|
||||
* **Works with UPnP or manual port forwarding.**
|
||||
* **Automatically opens and closes access** through the Connect dashboard or API, with sessions limited by time for added security.
|
||||
|
||||
**Static remote access** keeps your WebGUI continuously available from the internet.
|
||||
|
||||
* **Server is always accessible from the internet** on the configured port.
|
||||
* **Higher risk:** The WebGUI is exposed to WAN traffic at all times, increasing potential vulnerability.
|
||||
|
||||
| Feature | Dynamic remote access | Static remote access |
|
||||
| --- | --- | --- |
|
||||
| WebGUI open to internet | Only when enabled | Always |
|
||||
| Attack surface | Minimized | Maximized |
|
||||
| Automation | Auto open/close via Connect | Manual setup, always open |
|
||||
| UPnP support | Yes | Yes |
|
||||
| | **Recommended for most** | |
|
||||
|
||||
Dynamic remote access setup[](https://docs.unraid.net/unraid-connect/remote-access#dynamic-remote-access-setup "Direct link to Dynamic remote access setup")
|
||||
|
||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
To set up dynamic remote access:
|
||||
|
||||
1. In _**Settings → Management Access → Unraid API**_, select a dynamic option from the Remote Access dropdown:
|
||||
|
||||
* **Dynamic - UPnP:** Uses UPnP to open and close a random port automatically (requires UPnP enabled on your router).
|
||||
* **Dynamic - Manual port forward:** Requires you to forward the selected port on your router manually.
|
||||
2. Navigate to [Unraid Connect](https://connect.myunraid.net/)
|
||||
, and go to the management or server details page.
|
||||
|
||||
3. The **Dynamic remote access** card will show a button if your server isn’t currently accessible from your location.
|
||||
|
||||
4. Click the button to enable WAN access. If using UPnP, a new port forward lease is created (typically for 30 minutes) and auto-renewed while active.
|
||||
|
||||
5. The card will display the current status and UPnP state.
|
||||
|
||||
6. After 10 minutes of inactivity - or if you click **Disable remote access** - internet access is automatically revoked. UPnP leases are removed as well.
|
||||
|
||||
|
||||
* * *
|
||||
|
||||
Using UPnP (Universal Plug and Play)[](https://docs.unraid.net/unraid-connect/remote-access#using-upnp-universal-plug-and-play "Direct link to Using UPnP (Universal Plug and Play)")
|
||||
|
||||
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
UPnP automates port forwarding, simplifying remote access without requiring manual router configuration.
|
||||
|
||||
To configure UPnP:
|
||||
|
||||
1. **Enable UPnP on your router.** Ensure that your router supports UPnP and verify that it is enabled in the router settings.
|
||||
|
||||
2. **Enable UPnP in Unraid.** Navigate to _**Settings → Management Access**_ and change **Use UPnP** to **Yes**.
|
||||
|
||||
3. **Select UPnP in Unraid Connect.** On the Unraid Connect settings page, choose the remote access option as UPnP (select either Dynamic or Always On) and then click **Apply**.
|
||||
|
||||
4. **Verify port forwarding (Always On only).** Click the **Check** button. If successful, you'll see the message, "Your Unraid Server is reachable from the Internet."
|
||||
|
||||
For Dynamic forwarding, you need to click **Enable Dynamic Remote Access** in [Unraid Connect](https://connect.myunraid.net/)
|
||||
to allow access.
|
||||
|
||||
|
||||
Troubleshooting
|
||||
|
||||
If the setting changes from UPnP to Manual Port Forward upon reloading, Unraid might not be able to communicate with your router. Double-check that UPnP is enabled and consider updating your router's firmware.
|
||||
|
||||
* * *
|
||||
|
||||
Using manual port forwarding[](https://docs.unraid.net/unraid-connect/remote-access#using-manual-port-forwarding "Direct link to Using manual port forwarding")
|
||||
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Manual port forwarding provides greater control and is compatible with most routers.
|
||||
|
||||
To configure manual port forwarding:
|
||||
|
||||
1. **Choose a WAN port:** Pick a random port number above 1000 (for example, 13856 or 48653), rather than using the default 443.
|
||||
|
||||
2. **Apply settings in Unraid:** Click **Apply** to save the port you selected.
|
||||
|
||||
3. **Configure your router:** Set up a port forwarding rule on your router, directing your chosen WAN port to your server’s HTTPS port. The Unraid interface provides the correct ports and IP address.
|
||||
|
||||
Some routers may require the WAN port and HTTPS port to match. If so, use the same high random number for both.
|
||||
|
||||
4. **Verify port forwarding (Always On only):** Press the **Check** button. If everything is correct, you’ll see “Your Unraid Server is reachable from the Internet.”
|
||||
|
||||
For dynamic forwarding, ensure to click **Enable Dynamic Remote Access** in [Unraid Connect](https://connect.myunraid.net/)
|
||||
to enable access.
|
||||
|
||||
5. **Access your server:** Log in to [Unraid Connect](https://connect.myunraid.net/)
|
||||
and click the **Manage** link to connect to your server remotely.
|
||||
|
||||
|
||||
* * *
|
||||
|
||||
Enabling secure local access[](https://docs.unraid.net/unraid-connect/remote-access#enabling-secure-local-access "Direct link to Enabling secure local access")
|
||||
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Secure local access ensures that all connections to your Unraid WebGUI, even within your home or office network, are encrypted using HTTPS, thereby safeguarding any sensitive information, such as login credentials and configuration data.
|
||||
|
||||
Benefits of secure local access include:
|
||||
|
||||
* **Encryption** - All data exchanged between your browser and the server is protected.
|
||||
* **Consistency** - Use the same secure URL for both local and remote access.
|
||||
* **Compliance** - Adheres to security best practices for protecting administrative interfaces.
|
||||
|
||||
To enable secure local access:
|
||||
|
||||
1. Go to _**Settings → Management Access**_.
|
||||
2. In the **CA-signed certificate** section, check for DNS Rebinding warnings.
|
||||
* If no warnings show, set **Use SSL/TLS** to **Strict**.
|
||||
* If warnings are present, review [DNS Rebinding Protection](https://docs.unraid.net/unraid-os/system-administration/secure-your-server/securing-your-connection/#dns-rebinding-protection)
|
||||
.
|
||||
|
||||
important
|
||||
|
||||
With SSL/TLS set to Strict, client devices must resolve your server’s DNS name. If your Internet connection fails, access to the WebGUI may be lost. See [Accessing your server when DNS is down](https://docs.unraid.net/unraid-os/system-administration/secure-your-server/securing-your-connection/#accessing-your-server-when-dns-is-down)
|
||||
for recovery steps.
|
||||
|
||||
* [Initial setup](https://docs.unraid.net/unraid-connect/remote-access#initial-setup)
|
||||
|
||||
* [Choosing a remote access type](https://docs.unraid.net/unraid-connect/remote-access#choosing-a-remote-access-type)
|
||||
|
||||
* [Dynamic remote access setup](https://docs.unraid.net/unraid-connect/remote-access#dynamic-remote-access-setup)
|
||||
|
||||
* [Using UPnP (Universal Plug and Play)](https://docs.unraid.net/unraid-connect/remote-access#using-upnp-universal-plug-and-play)
|
||||
|
||||
* [Using manual port forwarding](https://docs.unraid.net/unraid-connect/remote-access#using-manual-port-forwarding)
|
||||
|
||||
* [Enabling secure local access](https://docs.unraid.net/unraid-connect/remote-access#enabling-secure-local-access)
|
||||
@@ -1,886 +0,0 @@
|
||||
# Unraid OS 7.0.0 Release Notes
|
||||
|
||||
> **Source:** [Unraid OS Release Notes - 7.0.0](https://docs.unraid.net/unraid-os/release-notes/7.0.0)
|
||||
> **Scraped:** 2026-02-07 | Raw content for reference purposes
|
||||
|
||||
This version of Unraid OS includes significant improvements across all subsystems, while attempting to maintain backward compatibility as much as possible.
|
||||
|
||||
Special thanks to:
|
||||
|
||||
* @bonienl, @dlandon, @ich777, @JorgeB, @SimonF, and @Squid for their direction, support, and development work on this release
|
||||
* @bonienl for merging their **Dynamix File Manager** plugin into the webgui
|
||||
* @Squid for merging their **GUI Search** and **Unlimited Width Plugin** plugins into the webgui
|
||||
* @ludoux (**Proxy Editor** plugin) and @Squid (**Community Applications** plugin) for pioneering the work on http proxy support, of which several ideas have been incorporated into the webgui
|
||||
* @ich777 for maintaining third-party driver plugins, and for the [Tailscale Docker integration](https://docs.unraid.net/unraid-os/release-notes/7.0.0#tailscale-integration)
|
||||
|
||||
* @SimonF for significant new features in the Unraid OS VM Manager
|
||||
* @EDACerton for development of the Tailscale plugin
|
||||
|
||||
View the [contributors to Unraid on GitHub](https://github.com/unraid/webgui/graphs/contributors?from=2023-09-08&to=2025-01-08&type=c)
|
||||
with shoutouts to these community members who have contributed PRs (these are GitHub ids):
|
||||
|
||||
* almightyYantao
|
||||
* baumerdev
|
||||
* Commifreak
|
||||
* desertwitch
|
||||
* dkaser
|
||||
* donbuehl
|
||||
* FunkeCoder23
|
||||
* Garbee
|
||||
* jbtwo
|
||||
* jski
|
||||
* Leseratte10
|
||||
* Mainfrezzer
|
||||
* mtongnz
|
||||
* othyn
|
||||
* serisman
|
||||
* suzukua
|
||||
* thecode
|
||||
|
||||
And sincere thanks to everyone who has requested features, reported bugs, and tested pre-releases!
|
||||
|
||||
Upgrading[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#upgrading "Direct link to Upgrading")
|
||||
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
|
||||
### Known issues[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#known-issues "Direct link to Known issues")
|
||||
|
||||
#### ZFS pools[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#zfs-pools "Direct link to ZFS pools")
|
||||
|
||||
If you are using ZFS pools, please take note of the following:
|
||||
|
||||
* You will see a warning about unsupported features in your existing ZFS pools. This is because the version of ZFS in 7.0 is upgraded vs. 6.12 and contains new features. This warning is harmless, meaning your pool will still function normally. A button will appear letting you upgrade a pool to support the new ZFS features; however, Unraid OS does not make use of these new features, and once upgraded previous versions of Unraid OS will not be able to mount the pool.
|
||||
* Similarly, new pools created in 7.0 will not mount in 6.12 due to ZFS not supporting downgrades. There is no way around this.
|
||||
* If you decide to downgrade from 7.0 to 6.12 any previously existing hybrid pools will not be recognized upon reboot into 6.12. To work around this, first click Tools/New Config in 7.0, preserving all slots, then reboot into 6.12 and your hybrid pools should import correctly.
|
||||
* ZFS spares are not supported in this release. If you have created a hybrid pool in 6.12 which includes spares, please remove the 'spares' vdev before upgrading to v7.0. This will be fixed in a future release.
|
||||
* Currently unable to import TrueNAS pools. This will be fixed in a future release.
|
||||
* If you are using **Docker data-root=directory** on a ZFS volume, see [Add support for overlay2 storage driver](https://docs.unraid.net/unraid-os/release-notes/7.0.0#add-support-for-overlay2-storage-driver)
|
||||
.
|
||||
* We check that VM names do not include characters that are not valid for ZFS. Existing VMs are not modified but will throw an error and disable update if invalid characters are found.
|
||||
|
||||
#### General pool issues[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#general-pool-issues "Direct link to General pool issues")
|
||||
|
||||
If your existing pools fail to import with _Wrong Pool State, invalid expansion_ or _Wrong pool State. Too many wrong or missing devices_, see this [forum post](https://forums.unraid.net/topic/184435-unraid-os-version-700-available/#findComment-1508012)
|
||||
.
|
||||
|
||||
#### Drive spindown issues[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#drive-spindown-issues "Direct link to Drive spindown issues")
|
||||
|
||||
Drives may not spin down when connected to older Marvell drive controllers that use the sata\_mv driver (i.e. Supermicro SASLP and SAS2LP) or to older Intel controllers (i.e. ICH7-ICH10). This may be resolved by a future kernel update.
|
||||
|
||||
#### Excessive flash drive activity slows the system down[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#excessive-flash-drive-activity-slows-the-system-down "Direct link to Excessive flash drive activity slows the system down")
|
||||
|
||||
If the system is running slowly, check the Main page and see if it shows significant continuous reads from the flash drive during normal operation. If so, the system may be experiencing sufficient memory pressure to push the OS out of RAM and cause it to be re-read from the flash drive. From the web terminal type:
|
||||
|
||||
touch /boot/config/fastusr
|
||||
|
||||
and then reboot. This will use around 500 MB of RAM to ensure the OS files always stay in memory. Please let us know if this helps.
|
||||
|
||||
#### New Windows changes may result in loss of access to Public shares[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#new-windows-changes-may-result-in-loss-of-access-to-public-shares "Direct link to New Windows changes may result in loss of access to Public shares")
|
||||
|
||||
Due to recent security changes in Windows 11 24H2, "guest" access of Unraid public shares may not work. The easiest way around this is to create a user in Unraid with the same name as the Windows account you are using to connect. If the Unraid user password is not the same as the Windows account password, Windows will prompt for credentials.
|
||||
|
||||
If you are using a Microsoft account, it may be better to create a user in Unraid with a simple username, set a password, then in Windows go to _**Control Panel → Credential Manager → Windows credentials → Add a Windows Credential**_ and add the correct Unraid server name and credentials.
|
||||
|
||||
Alternately you can [re-enable Windows guest fallback](https://techcommunity.microsoft.com/blog/filecab/accessing-a-third-party-nas-with-smb-in-windows-11-24h2-may-fail/4154300)
|
||||
(not recommended).
|
||||
|
||||
#### Problems due to Realtek network cards[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#problems-due-to-realtek-network-cards "Direct link to Problems due to Realtek network cards")
|
||||
|
||||
There have been multiple reports of issues with the Realtek driver plugin after upgrading to recent kernels. You may want to preemptively uninstall it before upgrading, or remove it afterwards if you have networking issues.
|
||||
|
||||
#### A virtual NIC is being assigned to eth0 on certain systems[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#a-virtual-nic-is-being-assigned-to-eth0-on-certain-systems "Direct link to A virtual NIC is being assigned to eth0 on certain systems")
|
||||
|
||||
On some systems with IPMI KVM, a virtual NIC is being assigned to eth0 instead of the expected NIC. See this [forum post](https://forums.unraid.net/bug-reports/stable-releases/61214-no-network-after-updating-eth0-assigned-to-virtual-usb-nic-cdc-ethernet-device-with-169-ip-instead-of-mellanox-10gbe-nic-r3407/)
|
||||
for options.
|
||||
|
||||
#### Issues using Docker custom networks[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#issues-using-docker-custom-networks "Direct link to Issues using Docker custom networks")
|
||||
|
||||
If certain custom Docker networks are not available for use by your Docker containers, navigate to _**Settings → Docker**_ and fix the CIDR definitions for the subnet mask and DHCP pool on those custom networks. The underlying systems have gotten more strict and invalid CIDR definitions which worked in earlier releases no longer work.
|
||||
|
||||
### Rolling back[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#rolling-back "Direct link to Rolling back")
|
||||
|
||||
See the warnings under **Known Issues** above.
|
||||
|
||||
The Dynamix File Manager, GUI Search, and Unlimited Width Plugin plugins are now built into Unraid. If you rollback to an earlier version you will need to reinstall those plugins to retain their functionality.
|
||||
|
||||
If you disabled the unRAID array we recommend enabling it again before rolling back.
|
||||
|
||||
If you previously had Outgoing Proxies set up using the Proxy Editor plugin or some other mechanism, you will need to re-enable that mechanism after rolling back.
|
||||
|
||||
If you roll back after enabling the [overlay2 storage driver](https://docs.unraid.net/unraid-os/release-notes/7.0.0#add-support-for-overlay2-storage-driver)
|
||||
you will need to delete the Docker directory and let Docker re-download the image layers.
|
||||
|
||||
If you roll back after installing [Tailscale in a Docker container](https://docs.unraid.net/unraid-os/release-notes/7.0.0#tailscale-integration)
|
||||
, you will need to edit the container, make a dummy change, and **Apply** to rebuild the container without the Tailscale integration.
|
||||
|
||||
After rolling back, make a dummy change to each WireGuard config to get the settings appropriate for that version of Unraid.
|
||||
|
||||
If rolling back earlier than 6.12.14, also see the [6.12.14 release notes](https://docs.unraid.net/unraid-os/release-notes/6.12.14/#rolling-back)
|
||||
.
|
||||
|
||||
Storage[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#storage "Direct link to Storage")
|
||||
|
||||
---------------------------------------------------------------------------------------------------
|
||||
|
||||
### unRAID array optional[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#unraid-array-optional "Direct link to unRAID array optional")
|
||||
|
||||
You can now set the number of unRAID array slots to 'none'. This will allow the array to Start without any devices assigned to the unRAID array itself.
|
||||
|
||||
If you are running an all-SSD/NMVe server, we recommend assigning all devices to one or more ZFS/BTRFS pools, since Trim/Discard is not supported with unRAID array devices.
|
||||
|
||||
To unassign the unRAID array from an existing server, first unassign all Array slots on the Main page, and then set the Slots to 'none'.
|
||||
|
||||
For new installs, the default number of slots to reserve for the unRAID array is now 'none'.
|
||||
|
||||
### Share secondary storage may be assigned to a pool[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#share-secondary-storage-may-be-assigned-to-a-pool "Direct link to Share secondary storage may be assigned to a pool")
|
||||
|
||||
Shares can now be configured with pools for both primary and secondary storage, and mover will move files between those pools. As a result of this change, the maximum number of supported pools is now 34 (previously 35).
|
||||
|
||||
### ReiserFS file system option has been disabled[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#reiserfs-file-system-option-has-been-disabled "Direct link to ReiserFS file system option has been disabled")
|
||||
|
||||
Since ReiserFS is scheduled to be removed from the Linux kernel, the option to format a device with ReiserFS has also been disabled. You may use this mover function to empty an array disk prior to reformatting with another file system, see below. We will add a webGUI button for this in a future release.
|
||||
|
||||
### Using 'mover' to empty an array disk[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#using-mover-to-empty-an-array-disk "Direct link to Using 'mover' to empty an array disk")
|
||||
|
||||
Removed in Unraid 7.2.1
|
||||
|
||||
This command line option was removed in Unraid 7.2.1. On newer releases, use the WebGUI method instead. See [Converting to a new file system type](https://docs.unraid.net/unraid-os/using-unraid-to/manage-storage/file-systems/#converting-to-a-new-file-system-type)
|
||||
for details.
|
||||
|
||||
Mover can now be used to empty an array disk. With the array started, run this at a web terminal:
|
||||
|
||||
mover start -e diskN |& logger & # where N is [1..28]
|
||||
|
||||
Mover will look at each top-level director (share) and then move files one-by-one to other disks in the array, following the usual config settings (include/exclude, split-level, alloc method). Move targets are restricted to just the unRAID array.
|
||||
|
||||
Watch the syslog for status. When the mover process ends, the syslog will show a list of files which could not be moved:
|
||||
|
||||
* maybe file was in-use
|
||||
* maybe file is at the top-level of /mnt/diskN
|
||||
* maybe we ran out of space
|
||||
|
||||
### Predefined shares handling[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#predefined-shares-handling "Direct link to Predefined shares handling")
|
||||
|
||||
The Unraid OS Docker Manager is configured by default to use these predefined shares:
|
||||
|
||||
* system - used to store Docker image layers in a loopback image stored in system/docker.
|
||||
* appdata - used by Docker applications to store application data.
|
||||
|
||||
The Unraid OS VM Manager is configured by default to use these predefined shares:
|
||||
|
||||
* system - used to store libvirt loopback image stored in system/libvirt
|
||||
* domains - used to store VM vdisk images
|
||||
* isos - used to store ISO boot images
|
||||
|
||||
When either Docker or VMs are enabled, the required predefined shares are created if necessary according to these rules:
|
||||
|
||||
* if a pool named 'cache' is present, predefined shares are created with 'cache' as the Primary storage with no Secondary storage.
|
||||
* if no pool named 'cache' is present, the predefined shares are created with the first alphabetically present pool as Primary with no Secondary storage.
|
||||
* if no pools are present, the predefined shares are created on the unRAID array as Primary with no Secondary storage.
|
||||
|
||||
### ZFS implementation[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#zfs-implementation "Direct link to ZFS implementation")
|
||||
|
||||
* Support Hybrid ZFS pools aka subpools (except 'spares')
|
||||
* Support recovery from multiple drive failures in a ZFS pool with sufficient protection
|
||||
* Support LUKS encryption on ZFS pools and drives
|
||||
* Set reasonable default profiles for new ZFS pools and subpools
|
||||
* Support upgrading ZFS pools when viewing the pool status. Note: after upgrading, the volume may not be mountable in previous versions of Unraid
|
||||
|
||||
### Allocation profiles for btrfs, zfs, and zfs subpools[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#allocation-profiles-for-btrfs-zfs-and-zfs-subpools "Direct link to Allocation profiles for btrfs, zfs, and zfs subpools")
|
||||
|
||||
When a btrfs or zfs pool/subpool is created, the default storage allocation is determined by the number of slots (devices) initially assigned to the pool:
|
||||
|
||||
* for zfs main (root) pool:
|
||||
|
||||
* slots == 1 => single
|
||||
* slots == 2 => mirror (1 group of 2 devices)
|
||||
* slots >= 3 => raidz1 (1 group of 'slots' devices)
|
||||
* for zfs special, logs, and dedup subpools:
|
||||
|
||||
* slots == 1 => single
|
||||
* slots%2 == 0 => mirror (slots/2 groups of 2 devices)
|
||||
* slots%3 == 0 => mirror (slots/3 groups of 3 devices)
|
||||
* otherwise => stripe (1 group of 'slots' devices)
|
||||
* for zfs cache and spare subpools:
|
||||
|
||||
* slots == 1 => single
|
||||
* slots >= 2 => stripe (1 group of 'slots' devices)
|
||||
* for btrfs pools:
|
||||
|
||||
* slots == 1 => single
|
||||
* slots >= 2 => raid1 (ie, what btrfs called "raid1")
|
||||
|
||||
### Pool considerations[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#pool-considerations "Direct link to Pool considerations")
|
||||
|
||||
When adding devices to (expanding) a single-slot pool, these rules apply:
|
||||
|
||||
For btrfs: adding one or more devices to a single-slot pool will result in converting the pool to raid1 (that is, what btrfs defines as raid1). Adding any number of devices to an existing multiple-slot btrfs pool increases the storage capacity of the pool and does not change the storage profile.
|
||||
|
||||
For zfs: adding one, two, or three devices to a single-slot pool will result in converting the pool to 2-way, 3-way, or 4-way mirror. Adding a single device to an existing 2-way or 3-way mirror converts the pool to a 3-way or 4-way mirror.
|
||||
|
||||
Changing the file system type of a pool:
|
||||
|
||||
For all single-slot pools, the file system type can be changed when array is Stopped.
|
||||
|
||||
For btrfs/zfs multi-slot pools, the file system type cannot be changed. To repurpose the devices you must click the Erase pool buton.
|
||||
|
||||
### Other features[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#other-features "Direct link to Other features")
|
||||
|
||||
* Add Spin up/down devices of a pool in parallel
|
||||
* Add "Delete Pool" button, which unassigns all devices of a pool and then removes the pool. The devices themselves are not modified. This is useful when physically removing devices from a server.
|
||||
* Add ability to change encryption phrase/keyfile for LUKS encrypted disks
|
||||
* Introduce 'config/share.cfg' variable 'shareNOFILE' which sets maximum open file descriptors for shfs process (see the Known Issues)
|
||||
|
||||
VM Manager[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#vm-manager "Direct link to VM Manager")
|
||||
|
||||
------------------------------------------------------------------------------------------------------------
|
||||
|
||||
### Improvements[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#improvements "Direct link to Improvements")
|
||||
|
||||
Added support for VM clones, snapshots, and evdev passthru.
|
||||
|
||||
The VM editor now has a new read-only inline XML mode for advanced users, making it clear how the GUI choices affect the underlying XML used by the VM.
|
||||
|
||||
Big thanks to @SimonF for his ongoing enhancements to VMs.
|
||||
|
||||
### Other changes[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#other-changes "Direct link to Other changes")
|
||||
|
||||
* **VM Tab**
|
||||
* Show all graphics cards and IP addresses assigned to VMs
|
||||
* noVNC version: 1.5
|
||||
* **VM Manager Settings**
|
||||
* Added VM autostart disable option
|
||||
* **Add/edit VM template**
|
||||
* Added "inline xml view" option
|
||||
* Support user-created VM templates
|
||||
* Add qemu ppc64 target
|
||||
* Add qemu:override support
|
||||
* Add "QEMU command-line passthrough" feature
|
||||
* Add VM multifunction support, including "PCI Other"
|
||||
* VM template enhancements for Windows VMs, including hypervclock support
|
||||
* Add "migratable" on/off option for emulated CPU
|
||||
* Add offset and timer support
|
||||
* Add no keymap option and set Virtual GPU default keyboard to use it
|
||||
* Add nogpu option
|
||||
* Add SR-IOV support for Intel iGPU
|
||||
* Add storage override to specify where images are created at add VM
|
||||
* Add SSD flag for vdisks
|
||||
* Add Unmap Support
|
||||
* Check that VM name does not include characters that are not valid for ZFS.
|
||||
* **Dashboard**
|
||||
* Add VM usage statistics to the dashboard, enable on _**Settings → VM Manager → Show VM Usage**_
|
||||
|
||||
Docker[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#docker "Direct link to Docker")
|
||||
|
||||
------------------------------------------------------------------------------------------------
|
||||
|
||||
### Docker fork bomb prevention[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#docker-fork-bomb-prevention "Direct link to Docker fork bomb prevention")
|
||||
|
||||
To prevent "Docker fork bombs" we introduced a new setting, _**Settings → Docker → Docker PID Limit**_, which specifies the maximum number of Process ID's which any container may have active (with default 2048).
|
||||
|
||||
If you have a container that requires more PID's you may either increase this setting or you may override for a specific container by adding, for example, `--pids-limit 3000` to the Docker template _Extra Parameters_ setting.
|
||||
|
||||
### Add support for overlay2 storage driver[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#add-support-for-overlay2-storage-driver "Direct link to Add support for overlay2 storage driver")
|
||||
|
||||
If you are using **Docker data-root=directory** on a ZFS volume, we recommend that you navigate to _**Settings → Docker**_ and switch the **Docker storage driver** to **overlay2**, then delete the directory contents and let Docker re-download the image layers. The legacy **native** setting causes significant stability issues on ZFS volumes.
|
||||
|
||||
If retaining the ability to downgrade to earlier releases is important, then switch to **Docker data-root=xfs vDisk** instead.
|
||||
|
||||
### Other changes[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#other-changes-1 "Direct link to Other changes")
|
||||
|
||||
* See [Tailscale integration](https://docs.unraid.net/unraid-os/release-notes/7.0.0#tailscale-integration)
|
||||
|
||||
* Allow custom registry with a port specification
|
||||
* Use "lazy unmount" unmount of docker image to prevent blocking array stop
|
||||
* Updated to address multiple security issues (CVE-2024-21626, CVE-2024-24557)
|
||||
* Docker Manager:
|
||||
* Allow users to select Container networks in the WebUI
|
||||
* Correctly identify/show non dockerman Managed containers
|
||||
* rc.docker:
|
||||
* Only stop Unraid managed containers
|
||||
* Honor restart policy from 3rd party containers
|
||||
* Set MTU of Docker Wireguard bridge to match Wireguard default MTU
|
||||
|
||||
Networking[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#networking "Direct link to Networking")
|
||||
|
||||
------------------------------------------------------------------------------------------------------------
|
||||
|
||||
### Tailscale integration[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#tailscale-integration "Direct link to Tailscale integration")
|
||||
|
||||
Unraid OS supports [Tailscale](https://tailscale.com/)
|
||||
through the use of a plugin created by Community Developer EDACerton. When this plugin is installed, Tailscale certificates are supported for https webGUI access, and the Tailnet URLs will be displayed on the _**Settings → Management Access**_ page.
|
||||
|
||||
And in Unraid natively, you can optionally install Tailscale in almost any Docker container, giving you the ability to share containers with specific people, access them using valid https certificates, and give them alternate routes to the Internet via Exit Nodes.
|
||||
|
||||
For more details see [the docs](https://docs.unraid.net/unraid-os/system-administration/secure-your-server/tailscale/)
|
||||
|
||||
### Support iframing the webGUI[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#support-iframing-the-webgui "Direct link to Support iframing the webGUI")
|
||||
|
||||
Added "Content-Security-Policy frame-ancestors" support to automatically allow the webGUI to be iframed by domains it has certificates for. It isn't exactly supported, but additional customization is possible by using a script to modify NGINX\_CUSTOMFA in `/etc/defaults/nginx`
|
||||
|
||||
### Other changes[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#other-changes-2 "Direct link to Other changes")
|
||||
|
||||
* Upgraded to OpenSSL 3
|
||||
* Allow ALL IPv4/IPv6 addresses as listener. This solves the issue when IPv4 or IPv6 addresses change dynamically
|
||||
* Samba:
|
||||
* Add ipv6 listening address only when NetBIOS is disabled
|
||||
* Fix MacOS unable to write 'flash' share and restore Time Machine compatibility (fruit changes)
|
||||
* The VPN manager now adds all interfaces to WireGuard tunnels, make a dummy change to the tunnel after upgrading or changing network settings to update WireGuard tunnel configs.
|
||||
|
||||
webGUI[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#webgui "Direct link to webGUI")
|
||||
|
||||
------------------------------------------------------------------------------------------------
|
||||
|
||||
### Integrated Dynamix File Manager plugin[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#integrated-dynamix-file-manager-plugin "Direct link to Integrated Dynamix File Manager plugin")
|
||||
|
||||
Click the file manager icon and navigate through your directory structure with the ability to perform common operations such as copy, move, delete, and rename files and directories.
|
||||
|
||||
### Integrated GUI Search plugin[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#integrated-gui-search-plugin "Direct link to Integrated GUI Search plugin")
|
||||
|
||||
Click the search icon on the Menu bar and type the name of the setting you are looking for.
|
||||
|
||||
### Outgoing Proxy Manager[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#outgoing-proxy-manager "Direct link to Outgoing Proxy Manager")
|
||||
|
||||
If you previously used the Proxy Editor plugin or had an outgoing proxy setup for CA, those will automatically be removed/imported. You can then adjust them on _**Settings → Outgoing Proxy Manager**_.
|
||||
|
||||
For more details, see the [manual](https://docs.unraid.net/unraid-os/system-administration/secure-your-server/secure-your-outgoing-comms/)
|
||||
.
|
||||
|
||||
Note: this feature is completely unrelated to any reverse proxies you may be using.
|
||||
|
||||
### Notification Agents[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#notification-agents "Direct link to Notification Agents")
|
||||
|
||||
Notification agents xml are now stored as individual xml files, making it easier to add notification agents via plugin.
|
||||
|
||||
See this [sample plugin](https://github.com/Squidly271/Wxwork-sample)
|
||||
by @Squid
|
||||
|
||||
* fix: Agent notifications do not work if there is a problem with email notifications
|
||||
|
||||
### NTP Configuration[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#ntp-configuration "Direct link to NTP Configuration")
|
||||
|
||||
For new installs, a single default NTP server is set to 'time.google.com'.
|
||||
|
||||
If your server is using our previous NTP defaults of time1.google.com, time2.google.com etc, you may notice some confusing NTP-related messages in your syslog. To avoid this, consider changing to our new defaults: navigate to _**Settings → Date & Time**_ and configure **NTP server 1** to be time.google.com, leaving all the others blank.
|
||||
|
||||
Of course, you are welcome to use any time servers you prefer, this is just to let you know that we have tweaked our defaults.
|
||||
|
||||
### NFS Shares[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#nfs-shares "Direct link to NFS Shares")
|
||||
|
||||
We have added a few new settings to help resolve issues with NFS shares. On _**Settings → Global Share Settings**_ you can adjust the number of fuse file descriptors and on _**Settings → NFS**_ you can adjust the NFS protocol version and number of threads it uses. See the inline help for details.
|
||||
|
||||
* Added support for NFS 4.1 and 4.2, and permit NFSv4 mounts by default
|
||||
* Add a text box to configure multi-line NFS rules
|
||||
* Bug fix: nfsd doesn't restart properly
|
||||
|
||||
### Dashboard[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#dashboard "Direct link to Dashboard")
|
||||
|
||||
* Add server date and time to the Dashboard; click the time to edit related settings
|
||||
* Rework the **System** tile to clarify what is being shown, including tooltips
|
||||
* Show useful content when dashboard tiles are minimized
|
||||
* Show Docker RAM usage on Dashboard
|
||||
* Add Docker RAM usage to the Dashboard
|
||||
* Rename 'Services' to 'System'
|
||||
* Fix memory leak on the Dashboard, VM Manager and Docker Manager pages
|
||||
|
||||
### SMART improvements[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#smart-improvements "Direct link to SMART improvements")
|
||||
|
||||
* Display KB/MB/GB/TB written in SMART Attributes for SSDs
|
||||
* Add 'SSD endurance remaining' SMART Attribute.
|
||||
|
||||
### Diagnostics[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#diagnostics "Direct link to Diagnostics")
|
||||
|
||||
* Add gpujson from gpu\_statistics to diagnostics
|
||||
* Improved anonymization of LXC logs
|
||||
* If the FCP plugin is installed, run scan during diagnostics
|
||||
* Add phplog to identify PHP errors
|
||||
* Improved anonymization of IPv6 addresses
|
||||
* Removed ps.txt because it exposed passwords in the process list
|
||||
|
||||
### Other changes[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#other-changes-3 "Direct link to Other changes")
|
||||
|
||||
* Support different warning/critical temperature thresholds for HDD/SSD/NVMe drives. NVMe thresholds are set automatically by the drive itself, see _**Settings → Disk Settings**_ to set the thresholds for HDDs and SSDs. All can still be overridden for individual drives.
|
||||
* Add _**Settings → Local Console Settings**_ page with options for keyboard layout, screen blank time, and persistent Bash history
|
||||
* Add _**Settings → Power Mode**_ to optimize the system for power efficiency, balanced, or performance
|
||||
* Hover over an entry on **Tools** and **Settings** to favorite an item, and quickly get back to it on the new top-level **Favorites** page. Or disable Favorites functionality on \***Settings → Display Settings**.
|
||||
* Enhanced shutdown/restart screen showing more details of the process
|
||||
* Simplify notifications by removing submenus - View, History, and Acknowledge now apply to all notification types
|
||||
* Move date & time settings from **Display Settings** to _**Settings → Date & Time Settings**_
|
||||
* _**Settings → Display settings**_: new setting "width" to take advantage of larger screens
|
||||
* Optionally display NVMe power usage; see _**Settings → Disk Settings**_
|
||||
* Web component enhancements – downgrades, updates, and registration
|
||||
* Prevent formatting new drives as ReiserFS
|
||||
* Use atomic writes for updates of config files
|
||||
* ZFS pool settings changes:
|
||||
* Create meaningful ZFS subpool descriptions
|
||||
* Change ZFS profile text 'raid0' to 'stripe'
|
||||
* Add additional USB device passthrough smartmontools options to webgui (thanks to GitHub user jski)
|
||||
* UPS Settings page (thanks to @othyn):
|
||||
* Add the ability to set a manual UPS capacity override.
|
||||
* UserEdit: in addition to Ed25519, FIDO/U2F Ed25519, and RSA, support SSH key types DSA, ECDSA, and FIDO/U2F ECDSA
|
||||
* OpenTerminal: use shell defined for root user in /etc/passwd file
|
||||
* Always display the "delete share" option, but disable it when the share is not empty
|
||||
|
||||
Misc[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#misc "Direct link to Misc")
|
||||
|
||||
------------------------------------------------------------------------------------------
|
||||
|
||||
### Other changes[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#other-changes-4 "Direct link to Other changes")
|
||||
|
||||
* Replace very old 'memtest' with Memtest86+ version 6.20
|
||||
* There are also [Boot Options](https://github.com/memtest86plus/memtest86plus#boot-options)
|
||||
available
|
||||
* Remove support for legacy unraid.net certs
|
||||
* Remove "UpdateDNS" functionality since no longer using legacy non-wildcard 'unraid.net' SSL certs
|
||||
* Strip proxy info and '&' from go script
|
||||
* passwd file handling correction
|
||||
* When avahidaemon running, add name.local to hosts file
|
||||
* Remove keys.lime-technology.com from hosts file
|
||||
* rc.S: remove wsync from XFS mount to prevent WebGUI from freezing during heavy I/O on /boot
|
||||
* make\_bootable\_linux: version 1.4
|
||||
* detect if mtools is installed
|
||||
* ntp.conf: set 'logconfig' to ignore LOG\_INFO
|
||||
* Speed things up: use AVAHI reload instead of restart
|
||||
* Linux kernel: force all buggy Seagate external USB enclosures to bind to usb-storage instead of UAS driver
|
||||
* Startup improvements in rc.S script:
|
||||
* Automatically repair boot sector backup
|
||||
* Explicitly unmount all file systems if cannot continue boot
|
||||
* Detect bad root value in syslinux.cfg
|
||||
* reboot should not invoke shutdown
|
||||
* Clean up empty cgroups
|
||||
* Samba smb.conf: set "nmbd bind explicit broadcast = no" if NetBIOS enabled
|
||||
* Add fastcgi\_path\_info to default nginx configuration
|
||||
* Ensure calls to pgrep or killall are restricted to the current namespace
|
||||
* (Advanced) Added ability to apply custom udev rules from `/boot/config/udev/` upon boot
|
||||
* Bug fix: Correct handling of empty Trial.key when download fails
|
||||
* Bug fix: Fix PHP warning for UPS status
|
||||
* Create meaningful /etc/os-release file
|
||||
* Misc translation fixes
|
||||
* Bug fix: JavaScript console logging functionality restored
|
||||
* Clicking Unraid version number loads release notes from Unraid Docs website
|
||||
|
||||
Linux kernel[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#linux-kernel "Direct link to Linux kernel")
|
||||
|
||||
------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
* version 6.6.68
|
||||
* CONFIG\_MISC\_RTSX\_PCI: Realtek PCI-E card reader
|
||||
* CONFIG\_MISC\_RTSX\_USB: Realtek USB card reader
|
||||
* CONFIG\_DRM\_XE: Intel Xe Graphics
|
||||
* CONFIG\_DRM\_XE\_DISPLAY: Enable display support
|
||||
* CONFIG\_AUDIT: Auditing support
|
||||
* CONFIG\_USB\_SERIAL\_OPTION: USB driver for GSM and CDMA modems
|
||||
* CONFIG\_USB\_SERIAL\_SIMPLE: USB Serial Simple Driver
|
||||
* CONFIG\_USB\_UAS: USB Attached SCSI
|
||||
* CONFIG\_NFS\_V4\_1: NFS client support for NFSv4.1
|
||||
* CONFIG\_NFS\_V4\_1\_MIGRATION: NFSv4.1 client support for migration
|
||||
* CONFIG\_NFS\_V4\_2: NFS client support for NFSv4.2
|
||||
* CONFIG\_NFS\_V4\_2\_READ\_PLUS: NFS: Enable support for the NFSv4.2 READ\_PLUS operation
|
||||
* CONFIG\_NFSD\_V4\_2\_INTER\_SSC: NFSv4.2 inter server to server COPY
|
||||
* CONFIG\_USB\_NET\_CDC\_EEM: CDC EEM support
|
||||
* CONFIG\_USB\_NET\_CDC\_NCM: CDC NCM support
|
||||
* CONFIG\_USB\_SERIAL\_XR: USB MaxLinear/Exar USB to Serial driver
|
||||
* CONFIG\_CAN: CAN bus subsystem support
|
||||
* CONFIG\_CAN\_NETLINK: CAN device drivers with Netlink support
|
||||
* CONFIG\_CAN\_GS\_USB: Geschwister Schneider UG and candleLight compatible interfaces
|
||||
* CONFIG\_SCSI\_LPFC: Emulex LightPulse Fibre Channel Support
|
||||
* CONFIG\_DRM\_VIRTIO\_GPU: Virtio GPU driver
|
||||
* CONFIG\_DRM\_VIRTIO\_GPU\_KMS: Virtio GPU driver modesetting support
|
||||
* CONFIG\_LEDS\_TRIGGERS: LED Trigger support
|
||||
* CONFIG\_LEDS\_TRIGGER\_ONESHOT: LED One-shot Trigger
|
||||
* CONFIG\_LEDS\_TRIGGER\_NETDEV: LED Netdev Trigger
|
||||
* CONFIG\_QED: QLogic QED 25/40/100Gb core driver
|
||||
* CONFIG\_QED\_SRIOV: QLogic QED 25/40/100Gb SR-IOV support
|
||||
* CONFIG\_QEDE: QLogic QED 25/40/100Gb Ethernet NIC
|
||||
* CONFIG\_SCSI\_UFSHCD: Universal Flash Storage Controller
|
||||
* CONFIG\_SCSI\_UFS\_BSG: Universal Flash Storage BSG device node
|
||||
* CONFIG\_SCSI\_UFS\_HWMON: UFS Temperature Notification
|
||||
* CONFIG\_SCSI\_UFSHCD\_PCI: PCI bus based UFS Controller support
|
||||
* CONFIG\_SCSI\_UFS\_DWC\_TC\_PCI: DesignWare pci support using a G210 Test Chip
|
||||
* CONFIG\_SCSI\_UFSHCD\_PLATFORM: Platform bus based UFS Controller support
|
||||
* CONFIG\_SCSI\_UFS\_CDNS\_PLATFORM: Cadence UFS Controller platform driver
|
||||
* CONFIG\_SCSI\_QLA\_FC: QLogic QLA2XXX Fibre Channel Support
|
||||
* CONFIG\_LIQUIDIO: Cavium LiquidIO support
|
||||
* CONFIG\_LIQUIDIO\_VF: Cavium LiquidIO VF support
|
||||
* CONFIG\_NTFS\_FS: NTFS file system support \[removed - this is the old read-only vfs module\]
|
||||
* CONFIG\_NTFS3\_FS: NTFS Read-Write file system support
|
||||
* CONFIG\_NTFS3\_LZX\_XPRESS: activate support of external compressions lzx/xpress
|
||||
* CONFIG\_NTFS3\_FS\_POSIX\_ACL: NTFS POSIX Access Control Lists
|
||||
* CONFIG\_UHID: User-space I/O driver support for HID subsystem
|
||||
* md/unraid: version 2.9.33
|
||||
* fix regression: empty slots before first occupied slot returns NO\_DEVICES
|
||||
* fix handling of device failure during rebuild/sync
|
||||
* removed XEN support
|
||||
|
||||
Base distro[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#base-distro "Direct link to Base distro")
|
||||
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
* aaa\_base: version 15.1
|
||||
* aaa\_glibc-solibs: version 2.40
|
||||
* aaa\_libraries: version 15.1
|
||||
* acl: version 2.3.2
|
||||
* acpid: version 2.0.34
|
||||
* adwaita-icon-theme: version 47.0
|
||||
* apcupsd: version 3.14.14
|
||||
* appres: version 1.0.7
|
||||
* at: version 3.2.5
|
||||
* at-spi2-atk: version 2.38.0
|
||||
* at-spi2-core: version 2.54.0
|
||||
* atk: version 2.38.0
|
||||
* attr: version 2.5.2
|
||||
* avahi: version 0.8
|
||||
* bash: version 5.2.037
|
||||
* bash-completion: version 2.16.0
|
||||
* beep: version 1.3
|
||||
* bin: version 11.1
|
||||
* bind: version 9.20.4
|
||||
* bluez-firmware: version 1.2
|
||||
* bridge-utils: version 1.7.1
|
||||
* brotli: version 1.1.0
|
||||
* btrfs-progs: version 6.12
|
||||
* bzip2: version 1.0.8
|
||||
* ca-certificates: version 20241120
|
||||
* cairo: version 1.18.2
|
||||
* celt051: version 0.5.1.3
|
||||
* cifs-utils: version 7.1
|
||||
* coreutils: version 9.5
|
||||
* cpio: version 2.15
|
||||
* cpufrequtils: version 008
|
||||
* cracklib: version 2.10.3
|
||||
* cryptsetup: version 2.7.5
|
||||
* curl: version 8.11.1
|
||||
* cyrus-sasl: version 2.1.28
|
||||
* db48: version 4.8.30
|
||||
* dbus: version 1.16.0
|
||||
* dbus-glib: version 0.112
|
||||
* dcron: version 4.5
|
||||
* dejavu-fonts-ttf: version 2.37
|
||||
* devs: version 2.3.1
|
||||
* dhcpcd: version 10.0.10
|
||||
* diffutils: version 3.10
|
||||
* dmidecode: version 3.6
|
||||
* dnsmasq: version 2.90
|
||||
* docker: version 27.0.3
|
||||
* dosfstools: version 4.2
|
||||
* e2fsprogs: version 1.47.1
|
||||
* ebtables: version 2.0.11
|
||||
* editres: version 1.0.9
|
||||
* elfutils: version 0.192
|
||||
* elogind: version 255.5
|
||||
* elvis: version 2.2\_0
|
||||
* encodings: version 1.1.0
|
||||
* etc: version 15.1
|
||||
* ethtool: version 5.19
|
||||
* eudev: version 3.2.14
|
||||
* file: version 5.46
|
||||
* findutils: version 4.10.0
|
||||
* flex: version 2.6.4
|
||||
* floppy: version 5.5
|
||||
* fluxbox: version 1.3.7
|
||||
* fontconfig: version 2.15.0
|
||||
* freeglut: version 3.6.0
|
||||
* freetype: version 2.13.3
|
||||
* fribidi: version 1.0.16
|
||||
* fuse3: version 3.16.2
|
||||
* gawk: version 5.3.1
|
||||
* gd: version 2.3.3
|
||||
* gdbm: version 1.24
|
||||
* gdk-pixbuf2: version 2.42.12
|
||||
* genpower: version 1.0.5
|
||||
* git: version 2.47.1
|
||||
* glew: version 2.2.0
|
||||
* glib2: version 2.82.4
|
||||
* glibc: version 2.40
|
||||
* glibc-zoneinfo: version 2024b
|
||||
* glu: version 9.0.3
|
||||
* gmp: version 6.3.0
|
||||
* gnutls: version 3.8.8
|
||||
* gptfdisk: version 1.0.10
|
||||
* graphite2: version 1.3.14
|
||||
* grep: version 3.11
|
||||
* gtk+3: version 3.24.43
|
||||
* gzip: version 1.13
|
||||
* harfbuzz: version 10.1.0
|
||||
* hdparm: version 9.65
|
||||
* hicolor-icon-theme: version 0.18
|
||||
* hostname: version 3.25
|
||||
* htop: version 3.3.0
|
||||
* hwloc: version 2.2.0
|
||||
* icu4c: version 76.1
|
||||
* imlib2: version 1.7.1
|
||||
* inetd: version 1.79s
|
||||
* infozip: version 6.0
|
||||
* inih: version 58
|
||||
* inotify-tools: version 4.23.9.0
|
||||
* intel-microcode: version 20241112
|
||||
* iperf3: version 3.17.1
|
||||
* iproute2: version 6.12.0
|
||||
* iptables: version 1.8.11
|
||||
* iputils: version 20240905
|
||||
* irqbalance: version 1.7.0
|
||||
* jansson: version 2.14
|
||||
* jemalloc: version 5.3.0
|
||||
* jq: version 1.6
|
||||
* json-c: version 0.18\_20240915
|
||||
* json-glib: version 1.10.6
|
||||
* kbd: version 2.7.1
|
||||
* kernel-firmware: version 20241220\_9e1d9ae
|
||||
* keyutils: version 1.6.3
|
||||
* kmod: version 33
|
||||
* krb5: version 1.21.3
|
||||
* lbzip2: version 2.5
|
||||
* less: version 668
|
||||
* libICE: version 1.1.2
|
||||
* libSM: version 1.2.5
|
||||
* libX11: version 1.8.10
|
||||
* libXau: version 1.0.12
|
||||
* libXaw: version 1.0.16
|
||||
* libXcomposite: version 0.4.6
|
||||
* libXcursor: version 1.2.3
|
||||
* libXdamage: version 1.1.6
|
||||
* libXdmcp: version 1.1.5
|
||||
* libXevie: version 1.0.3
|
||||
* libXext: version 1.3.6
|
||||
* libXfixes: version 6.0.1
|
||||
* libXfont2: version 2.0.7
|
||||
* libXfontcache: version 1.0.5
|
||||
* libXft: version 2.3.8
|
||||
* libXi: version 1.8.2
|
||||
* libXinerama: version 1.1.5
|
||||
* libXmu: version 1.2.1
|
||||
* libXpm: version 3.5.17
|
||||
* libXrandr: version 1.5.4
|
||||
* libXrender: version 0.9.12
|
||||
* libXres: version 1.2.2
|
||||
* libXt: version 1.3.1
|
||||
* libXtst: version 1.2.5
|
||||
* libXxf86dga: version 1.1.6
|
||||
* libXxf86misc: version 1.0.4
|
||||
* libXxf86vm: version 1.1.6
|
||||
* libaio: version 0.3.113
|
||||
* libarchive: version 3.7.7
|
||||
* libcap-ng: version 0.8.5
|
||||
* libcgroup: version 0.41
|
||||
* libdaemon: version 0.14
|
||||
* libdeflate: version 1.23
|
||||
* libdmx: version 1.1.5
|
||||
* libdrm: version 2.4.124
|
||||
* libedit: version 20240808\_3.1
|
||||
* libepoxy: version 1.5.10
|
||||
* libestr: version 0.1.9
|
||||
* libevdev: version 1.13.3
|
||||
* libevent: version 2.1.12
|
||||
* libfastjson: version 0.99.9
|
||||
* libffi: version 3.4.6
|
||||
* libfontenc: version 1.1.8
|
||||
* libgcrypt: version 1.11.0
|
||||
* libglvnd: version 1.7.0
|
||||
* libgpg-error: version 1.51
|
||||
* libgudev: version 238
|
||||
* libidn: version 1.42
|
||||
* libjpeg-turbo: version 3.1.0
|
||||
* liblogging: version 1.0.6
|
||||
* libmnl: version 1.0.5
|
||||
* libnetfilter\_conntrack: version 1.1.0
|
||||
* libnfnetlink: version 1.0.2
|
||||
* libnftnl: version 1.2.8
|
||||
* libnl3: version 3.11.0
|
||||
* libnvme: version 1.11.1
|
||||
* libpcap: version 1.10.5
|
||||
* libpciaccess: version 0.18.1
|
||||
* libpng: version 1.6.44
|
||||
* libpsl: version 0.21.5
|
||||
* libpthread-stubs: version 0.5
|
||||
* libseccomp: version 2.5.5
|
||||
* libssh: version 0.11.1
|
||||
* libssh2: version 1.11.1
|
||||
* libtasn1: version 4.19.0
|
||||
* libtiff: version 4.7.0
|
||||
* libtirpc: version 1.3.6
|
||||
* libtpms: version 0.9.0
|
||||
* libunistring: version 1.3
|
||||
* libunwind: version 1.8.1
|
||||
* libusb: version 1.0.27
|
||||
* libusb-compat: version 0.1.8
|
||||
* libuv: version 1.49.2
|
||||
* libvirt: version 10.7.0
|
||||
* libvirt-php: version 0.5.8
|
||||
* libwebp: version 1.5.0
|
||||
* libwebsockets: version 4.3.2
|
||||
* libx86: version 1.1
|
||||
* libxcb: version 1.17.0
|
||||
* libxcvt: version 0.1.3
|
||||
* libxkbcommon: version 1.7.0
|
||||
* libxkbfile: version 1.1.3
|
||||
* libxml2: version 2.13.5
|
||||
* libxshmfence: version 1.3.3
|
||||
* libxslt: version 1.1.42
|
||||
* libzip: version 1.11.2
|
||||
* listres: version 1.0.6
|
||||
* lm\_sensors: version 3.6.0
|
||||
* lmdb: version 0.9.33
|
||||
* logrotate: version 3.22.0
|
||||
* lshw: version B.02.19.2
|
||||
* lsof: version 4.99.4
|
||||
* lsscsi: version 0.32
|
||||
* lvm2: version 2.03.29
|
||||
* lz4: version 1.10.0
|
||||
* lzip: version 1.24.1
|
||||
* lzlib: version 1.14
|
||||
* lzo: version 2.10
|
||||
* mbuffer: version 20240107
|
||||
* mc: version 4.8.31
|
||||
* mcelog: version 202
|
||||
* mesa: version 24.2.8
|
||||
* miniupnpc: version 2.1
|
||||
* mkfontscale: version 1.2.3
|
||||
* mpfr: version 4.2.1
|
||||
* mtdev: version 1.1.7
|
||||
* nano: version 8.3
|
||||
* ncompress: version 5.0
|
||||
* ncurses: version 6.5
|
||||
* net-tools: version 20181103\_0eebece
|
||||
* nettle: version 3.10
|
||||
* network-scripts: version 15.1
|
||||
* nfs-utils: version 2.8.2
|
||||
* nghttp2: version 1.64.0
|
||||
* nghttp3: version 1.7.0
|
||||
* nginx: version 1.27.2
|
||||
* noto-fonts-ttf: version 2024.12.01
|
||||
* nss-mdns: version 0.14.1
|
||||
* ntfs-3g: version 2022.10.3
|
||||
* ntp: version 4.2.8p18
|
||||
* numactl: version 2.0.13
|
||||
* nvme-cli: version 2.11
|
||||
* oniguruma: version 6.9.9
|
||||
* openssh: version 9.9p1
|
||||
* openssl: version 3.4.0
|
||||
* ovmf: version stable202411
|
||||
* p11-kit: version 0.25.5
|
||||
* pam: version 1.6.1
|
||||
* pango: version 1.54.0
|
||||
* patch: version 2.7.6
|
||||
* pciutils: version 3.13.0
|
||||
* pcre: version 8.45
|
||||
* pcre2: version 10.44
|
||||
* perl: version 5.40.0
|
||||
* php: version 8.3.8
|
||||
* pixman: version 0.44.2
|
||||
* pkgtools: version 15.1
|
||||
* procps-ng: version 4.0.4
|
||||
* pv: version 1.6.6
|
||||
* qemu: version 9.1.0
|
||||
* qrencode: version 4.1.1
|
||||
* readline: version 8.2.013
|
||||
* reiserfsprogs: version 3.6.27
|
||||
* rpcbind: version 1.2.6
|
||||
* rsync: version 3.3.0
|
||||
* rsyslog: version 8.2102.0
|
||||
* sakura: version 3.5.0
|
||||
* samba: version 4.21.1
|
||||
* sdparm: version 1.12
|
||||
* sed: version 4.9
|
||||
* sessreg: version 1.1.3
|
||||
* setxkbmap: version 1.3.4
|
||||
* sg3\_utils: version 1.48
|
||||
* shadow: version 4.16.0
|
||||
* shared-mime-info: version 2.4
|
||||
* slim: version 1.3.6
|
||||
* smartmontools: version 7.4
|
||||
* spice: version 0.15.0
|
||||
* spirv-llvm-translator: version 19.1.2
|
||||
* sqlite: version 3.46.1
|
||||
* ssmtp: version 2.64
|
||||
* startup-notification: version 0.12
|
||||
* sudo: version 1.9.16p2
|
||||
* swtpm: version 0.7.3
|
||||
* sysfsutils: version 2.1.1
|
||||
* sysstat: version 12.7.6
|
||||
* sysvinit: version 3.12
|
||||
* sysvinit-scripts: version 15.1
|
||||
* talloc: version 2.4.2
|
||||
* tar: version 1.35
|
||||
* tcp\_wrappers: version 7.6
|
||||
* tdb: version 1.4.12
|
||||
* telnet: version 0.17
|
||||
* tevent: version 0.16.1
|
||||
* traceroute: version 2.1.6
|
||||
* transset: version 1.0.4
|
||||
* tree: version 2.1.1
|
||||
* usbredir: version 0.8.0
|
||||
* usbutils: version 018
|
||||
* userspace-rcu: version 0.15.0
|
||||
* utempter: version 1.2.1
|
||||
* util-linux: version 2.40.2
|
||||
* vbetool: version 1.2.2
|
||||
* virtiofsd: version 1.11.1
|
||||
* vsftpd: version 3.0.5
|
||||
* vte3: version 0.50.2
|
||||
* wayland: version 1.23.1
|
||||
* wget: version 1.25.0
|
||||
* which: version 2.21
|
||||
* wireguard-tools: version 1.0.20210914
|
||||
* wqy-zenhei-font-ttf: version 0.8.38\_1
|
||||
* wsdd2: version 1.8.7
|
||||
* xauth: version 1.1.3
|
||||
* xcb-util: version 0.4.1
|
||||
* xcb-util-keysyms: version 0.4.1
|
||||
* xclock: version 1.1.1
|
||||
* xdpyinfo: version 1.3.4
|
||||
* xdriinfo: version 1.0.7
|
||||
* xev: version 1.2.6
|
||||
* xf86-input-evdev: version 2.11.0
|
||||
* xf86-input-keyboard: version 1.9.0
|
||||
* xf86-input-mouse: version 1.9.3
|
||||
* xf86-input-synaptics: version 1.9.2
|
||||
* xf86-video-ast: version 1.1.5
|
||||
* xf86-video-mga: version 2.1.0
|
||||
* xf86-video-vesa: version 2.6.0
|
||||
* xfsprogs: version 6.12.0
|
||||
* xhost: version 1.0.9
|
||||
* xinit: version 1.4.2
|
||||
* xkbcomp: version 1.4.7
|
||||
* xkbevd: version 1.1.6
|
||||
* xkbutils: version 1.0.6
|
||||
* xkeyboard-config: version 2.43
|
||||
* xkill: version 1.0.6
|
||||
* xload: version 1.2.0
|
||||
* xlsatoms: version 1.1.4
|
||||
* xlsclients: version 1.1.5
|
||||
* xmessage: version 1.0.7
|
||||
* xmodmap: version 1.0.11
|
||||
* xorg-server: version 21.1.15
|
||||
* xprop: version 1.2.8
|
||||
* xrandr: version 1.5.3
|
||||
* xrdb: version 1.2.2
|
||||
* xrefresh: version 1.1.0
|
||||
* xset: version 1.2.5
|
||||
* xsetroot: version 1.1.3
|
||||
* xsm: version 1.0.6
|
||||
* xterm: version 396
|
||||
* xtrans: version 1.5.2
|
||||
* xwd: version 1.0.9
|
||||
* xwininfo: version 1.1.6
|
||||
* xwud: version 1.0.7
|
||||
* xxHash: version 0.8.3
|
||||
* xz: version 5.6.3
|
||||
* yajl: version 2.1.0
|
||||
* zfs: version 2.2.7\_6.6.68\_Unraid
|
||||
* zlib: version 1.3.1
|
||||
* zstd: version 1.5.6
|
||||
|
||||
Patches[](https://docs.unraid.net/unraid-os/release-notes/7.0.0#patches "Direct link to Patches")
|
||||
|
||||
---------------------------------------------------------------------------------------------------
|
||||
|
||||
With the [Unraid Patch plugin](https://forums.unraid.net/topic/185560-unraid-patch-plugin/)
|
||||
installed, visit _**Tools → Unraid Patch**_ to get the following patches / hot fixes:
|
||||
|
||||
* mover was not moving shares with spaces in the name from array to pool
|
||||
* File Manager: allow access to UD remote shares
|
||||
* Share Listing: tool tip showed `%20` instead of a space
|
||||
* VM Manager: fix issue with blank Discard field on vDisk
|
||||
* Include installed patches in diagnostics
|
||||
|
||||
Note: if you have the Mover Tuning plugin installed, you will be prompted to reboot in order to apply these patches.
|
||||
@@ -1,374 +0,0 @@
|
||||
[Skip to main content](https://docs.unraid.net/unraid-os/release-notes/7.1.0#__docusaurus_skipToContent_fallback)
|
||||
|
||||
On this page
|
||||
|
||||
This release adds wireless networking, the ability to import TrueNAS and other foreign pools, multiple enhancements to VMs, early steps toward making the webGUI responsive, and more.
|
||||
|
||||
Upgrading[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#upgrading "Direct link to Upgrading")
|
||||
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
|
||||
### Known issues[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#known-issues "Direct link to Known issues")
|
||||
|
||||
This release has a potential data-loss issue where the recent "mover empty disk" feature does not handle split levels on shares correctly. Resolved in 7.1.2.
|
||||
|
||||
#### Plugins[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#plugins "Direct link to Plugins")
|
||||
|
||||
Please upgrade all plugins, particularly Unraid Connect and the Nvidia driver.
|
||||
|
||||
For other known issues, see the [7.0.0 release notes](https://docs.unraid.net/unraid-os/release-notes/7.0.0/#known-issues)
|
||||
.
|
||||
|
||||
### Rolling back[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#rolling-back "Direct link to Rolling back")
|
||||
|
||||
We are making improvements to how we distribute patches between releases, so the standalone Patch Plugin will be uninstalled from this release. If rolling back to an earlier release we'd recommend reinstalling it. More details to come.
|
||||
|
||||
If rolling back earlier than 7.0.0, also see the [7.0.0 release notes](https://docs.unraid.net/unraid-os/release-notes/7.0.0/#rolling-back)
|
||||
.
|
||||
|
||||
Changes vs. [7.0.1](https://docs.unraid.net/unraid-os/release-notes/7.0.1/)
|
||||
[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#changes-vs-701 "Direct link to changes-vs-701")
|
||||
|
||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
### Storage[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#storage "Direct link to Storage")
|
||||
|
||||
* Import foreign ZFS pools such as TrueNAS, Proxmox, Ubuntu, QNAP.
|
||||
* Import the largest partition on disk instead of the first.
|
||||
* Removing device from btrfs raid1 or zfs single-vdev mirror will now reduce pool slot count.
|
||||
|
||||
#### Other storage changes[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#other-storage-changes "Direct link to Other storage changes")
|
||||
|
||||
* Fix: Disabled disks were not shown on the Dashboard.
|
||||
* Fix: Initially, only the first pool device spins down after adding a custom spin down setting.
|
||||
* Fix: Array Start was permitted if only 2 Parity devices and no Data devices.
|
||||
* Fix: The parity check notification often shows the previous parity check and not the current parity check.
|
||||
* Fix: Resolved certain instances of _Wrong pool State. Too many wrong or missing devices_ when upgrading.
|
||||
* Fix: Not possible to replace a zfs device from a smaller vdev.
|
||||
* mover:
|
||||
* Fix: Resolved issue with older share.cfg files that prevented mover from running.
|
||||
* Fix: mover would fail to recreate hard link if parent directory did not already exist.
|
||||
* Fix: mover would hang on named pipes.
|
||||
* Fix: [Using mover to empty an array disk](https://docs.unraid.net/unraid-os/release-notes/7.0.0/#using-mover-to-empty-an-array-disk)
|
||||
now only moves top level folders that have a corresponding share.cfg file, also fixed a bug that prevented the list of files _not moved_ from displaying.
|
||||
|
||||
### Networking[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#networking "Direct link to Networking")
|
||||
|
||||
#### Wireless Networking[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#wireless-networking "Direct link to Wireless Networking")
|
||||
|
||||
Unraid now supports WiFi! A hard wired connection is typically preferred, but if that isn't possible for your situation you can now setup WiFi.
|
||||
|
||||
For the initial setup you will either need a local keyboard/monitor (boot into GUI mode) or a wired connection. In the future, the USB Creator will be able to configure wireless networking prior to the initial boot.
|
||||
|
||||
* Access the webGUI and visit _**Settings → Network Settings → Wireless wlan0**_
|
||||
* First, enable WiFi
|
||||
* The **Regulatory Region** can generally be left to **Automatic**, but set it to your location if the network you want to connect to is not available
|
||||
* Find your preferred network and click the **Connect to WiFi network** icon
|
||||
* Fill in your WiFi password and other settings, then press **Join this network**
|
||||
* Note: if your goal is to use Docker containers over WiFi, unplug any wired connection before starting Docker
|
||||
|
||||
Additional details
|
||||
|
||||
* WPA2/WPA3 and WPA2/WPA3 Enterprise are supported, if both WPA2 and WPA3 are available then WPA3 is used.
|
||||
* Having both wired and wireless isn't recommended for long term use, it should be one or the other. But if both connections use DHCP and you (un)plug a network cable while wireless is configured, the system (excluding Docker) should adjust within 45-60 seconds.
|
||||
* Wireless chipset support: We expect to have success with modern WiFi adapters, but older adapters may not work. If your WiFi adapter isn't detected, please start a new forum thread and provide your diagnostics so it can be investigated.
|
||||
* If you want to use a USB WiFi adapter, see this list of [USB WiFi adapters that are supported with Linux in-kernel drivers](https://github.com/morrownr/USB-WiFi/blob/main/home/USB_WiFi_Adapters_that_are_supported_with_Linux_in-kernel_drivers.md)
|
||||
|
||||
* Advanced: New firmware files placed in `/boot/config/firmware/` will be copied to `/lib/firmware/` before driver modules are loaded (existing files will not be overwritten).
|
||||
|
||||
Limitations: there are networking limitations when using wireless, as a wlan can only have a single mac address.
|
||||
|
||||
* Only one wireless NIC is supported, wlan0
|
||||
* wlan0 is not able to participate in a bond
|
||||
* Docker containers
|
||||
* On _**Settings → Docker**_, note that when wireless is enabled, the system will ignore the **Docker custom network type** setting and always use **ipvlan** (macvlan is not possible because wireless does not support multiple mac addresses on a single interface)
|
||||
* _**Settings → Docker**_, **Host access to custom networks** must be disabled
|
||||
* A Docker container's **Network Type** cannot use br0/bond0/eth0
|
||||
* Docker has a limitation that it cannot participate in two networks that share the same subnet. If switching between wired and wireless, you will need to restart Docker and reconfigure all existing containers to use the new interface. We recommend setting up either wired or wireless and not switching.
|
||||
* VMs
|
||||
* We recommend setting your VM **Network Source** to **virbr0**, there are no limits to how many VMs you can run in this mode. The VMs will have full network access, the downside is they will not be accessible from the network. You can still access them via VNC to the host.
|
||||
* With some manual configuration, a single VM can be made accessible on the network:
|
||||
* Configure the VM with a static IP address
|
||||
* Configure the same IP address on the ipvtap interface, type: `ip addr add IP-ADDRESS dev shim-wlan0`
|
||||
|
||||
#### Other networking changes[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#other-networking-changes "Direct link to Other networking changes")
|
||||
|
||||
* On _**Settings → Network Settings**_, you can now adjust the server's DNS settings without stopping other services first. See the top of the **eth0** section.
|
||||
* When configuring a network interface, each interface has an **Info** button showing details for the current connection.
|
||||
* When configuring a network interface, the **Desired MTU** field is disabled until you click **Enable jumbo frames**. Hover over the icon for a warning about changing the MTU, in most cases it should be left at the default setting.
|
||||
* When configuring multiple network interfaces, by default the additional interfaces will have their gateway disabled, this is a safe default that works on most networks where a single gateway is required. If an additional gateway is enabled, it will be given a higher metric than existing gateways so there are no conflicts. You can override as needed.
|
||||
* Old network interfaces are automatically removed from config files when you save changes to _**Settings → Network Settings**_.
|
||||
* Fix various issues with DHCP.
|
||||
|
||||
### VM Manager[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#vm-manager "Direct link to VM Manager")
|
||||
|
||||
#### Nouveau GPU driver[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#nouveau-gpu-driver "Direct link to Nouveau GPU driver")
|
||||
|
||||
The Nouveau driver for Nvidia GPUs is now included, disabled by default as we expect most users to want the Nvidia driver instead. To enable it, uninstall the Nvidia driver plugin and run `touch /boot/config/modprobe.d/nouveau.conf` then reboot.
|
||||
|
||||
#### VirGL[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#virgl "Direct link to VirGL")
|
||||
|
||||
You can now share Intel and AMD GPUs between multiple Linux VMs at the same time using VirGL, the virtual 3D OpenGL renderer. When used this way, the GPU will provide accelerated graphics but will not output on the monitor. Note that this does not yet work with Windows VMs or the standard Nvidia plugin (it does work with Nvidia GPUs using the Nouveau driver though).
|
||||
|
||||
To use the virtual GPU in a Linux VM, edit the VM template and set the **Graphics Card** to **Virtual**. Then set the **VM Console Video Driver** to **Virtio(3d)** and select the appropriate **Render GPU** from the list of available GPUs (note that GPUs bound to VFIO-PCI or passed through to other VMs cannot be chosen here, and Nvidia GPUs are available only if the Nouveau driver is enabled).
|
||||
|
||||
#### QXL Virtual GPUs[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#qxl-virtual-gpus "Direct link to QXL Virtual GPUs")
|
||||
|
||||
To use this feature in a VM, edit the VM template and set the **Graphics Card** to **Virtual** and the **VM Console Video Driver** to **QXL (Best)**, you can then choose how many screens it supports and how much memory to allocate to it.
|
||||
|
||||
#### CPU Pinning is optional[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#cpu-pinning-is-optional "Direct link to CPU Pinning is optional")
|
||||
|
||||
CPU pinning is now optional, if no cores are pinned to a VM then the OS chooses which cores to use.
|
||||
|
||||
From _**Settings → CPU Settings**_ or when editing a VM, press **Deselect All** to unpin all cores for this VM and set the number of vCPUs to 1, increase as needed.
|
||||
|
||||
### User VM Templates[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#user-vm-templates "Direct link to User VM Templates")
|
||||
|
||||
To create a user template:
|
||||
|
||||
* Edit the VM, choose **Create Modify Template** and give it a name. It will now be stored as a **User Template**, available on the **Add VM** screen.
|
||||
|
||||
To use a user template:
|
||||
|
||||
* From the VM listing, press **Add VM**, then choose the template from the **User Templates** area.
|
||||
|
||||
Import/Export
|
||||
|
||||
* From the **Add VM** screen, hover over a user template and click the arrow to export the template to a location on the server or download it.
|
||||
* On another Unraid system press **Import from file** or **Upload** to use the template.
|
||||
|
||||
#### Other VM changes[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#other-vm-changes "Direct link to Other VM changes")
|
||||
|
||||
* When the **Primary** GPU is assigned as passthrough for a VM, warn that it may not work without loading a compatible vBIOS.
|
||||
* Fix: Remove confusing _Path does not exist_ message when setting up the VM service
|
||||
* Feat: Unraid VMs can now boot into GUI mode, when using the QXL video driver
|
||||
* Fix: Could not change VM icon when using XML view
|
||||
|
||||
### WebGUI[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#webgui "Direct link to WebGUI")
|
||||
|
||||
#### CSS changes[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#css-changes "Direct link to CSS changes")
|
||||
|
||||
As a step toward making the webGUI responsive, we have reworked the CSS. For the most part, this should not be noticeable aside from some minor color adjustments. We expect that most plugins will be fine as well, although plugin authors may want to review [this documentation](https://github.com/unraid/webgui/blob/master/emhttp/plugins/dynamix/styles/themes/README.md)
|
||||
. Responsiveness will continue to be improved in future releases.
|
||||
|
||||
If you notice alignment issues or color problems in any official theme, please let us know.
|
||||
|
||||
#### nchan out of shared memory issues[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#nchan-out-of-shared-memory-issues "Direct link to nchan out of shared memory issues")
|
||||
|
||||
We have made several changes that should prevent this issue, and if we detect that it happens, we restart nginx in an attempt to automatically recover from it.
|
||||
|
||||
If your Main page never populates, or if you see "nchan: Out of shared memory" in your logs, please start a new forum thread and provide your diagnostics. You can optionally navigate to _**Settings → Display Settings**_ and disable **Allow realtime updates on inactive browsers**; this prevents your browser from requesting certain updates once it loses focus. When in this state you will see a banner saying **Live Updates Paused**, simply click on the webGUI to bring it to the foreground and re-enable live updates. Certain pages will automatically reload to ensure they are displaying the latest information.
|
||||
|
||||
#### Other WebGUI changes[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#other-webgui-changes "Direct link to Other WebGUI changes")
|
||||
|
||||
* Fix: AdBlockers could prevent Dashboard from loading
|
||||
* Fix: Under certain circumstances, browser memory utilization on the Dashboard could exponentially grow
|
||||
* Fix: Prevent corrupted config file from breaking the Dashboard
|
||||
|
||||
Misc[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#misc "Direct link to Misc")
|
||||
|
||||
------------------------------------------------------------------------------------------
|
||||
|
||||
### Other changes[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#other-changes "Direct link to Other changes")
|
||||
|
||||
* On _**Settings → Date and Time**_ you can now sync your clock with a **PTP** server (we expect most users will continue to use **NTP**)
|
||||
* Upgraded to jQuery 3.7.1 and jQuery UI 1.14.1
|
||||
* Fix: Visiting boot.php will no longer shutdown the server
|
||||
* Fix: On the Docker tab, the dropdown menu for the last container was truncated in certain situations
|
||||
* Fix: On _**Settings → Docker**_, deleting a **Docker directory** stored on a ZFS volume now works properly
|
||||
* Fix: On boot, custom ssh configuration copied from `/boot/config/ssh/` to `/etc/ssh/` again
|
||||
* Fix: File Manager can copy files from a User Share to an Unassigned Disk mount
|
||||
* Fix: Remove confusing _Path does not exist_ message when setting up the Docker service
|
||||
* Fix: update `rc.messagebus` to correct handling of `/etc/machine-id`
|
||||
* Diagnostics
|
||||
* Fix: Improved anonymization of IPv6 addresses in diagnostics
|
||||
* Fix: Improved anonymization of user names in certain config files in diagnostics
|
||||
* Fix: diagnostics could fail due to multibyte strings in syslog
|
||||
* Feat: diagnostics now logs errors in logs/diagnostics.error.log
|
||||
|
||||
### Linux kernel[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#linux-kernel "Direct link to Linux kernel")
|
||||
|
||||
* version 6.12.24-Unraid
|
||||
|
||||
* Apply: \[PATCH\] [Revert "PCI: Avoid reset when disabled via sysfs"](https://lore.kernel.org/lkml/20250414211828.3530741-1-alex.williamson@redhat.com/)
|
||||
|
||||
* CONFIG\_NR\_CPUS: increased from 256 to 512
|
||||
* CONFIG\_TEHUTI\_TN40: Tehuti Networks TN40xx 10G Ethernet adapters
|
||||
* CONFIG\_DRM\_XE: Intel Xe Graphics
|
||||
* CONFIG\_UDMABUF: userspace dmabuf misc driver
|
||||
* CONFIG\_DRM\_NOUVEAU: Nouveau (NVIDIA) cards
|
||||
* CONFIG\_DRM\_QXL: QXL virtual GPU
|
||||
* CONFIG\_EXFAT\_FS: exFAT filesystem support
|
||||
* CONFIG\_PSI: Pressure stall information tracking
|
||||
* CONFIG\_PSI\_DEFAULT\_DISABLED: Require boot parameter to enable pressure stall information tracking, i.e., `psi=1`
|
||||
* CONFIG\_ENCLOSURE\_SERVICES: Enclosure Services
|
||||
* CONFIG\_SCSI\_ENCLOSURE: SCSI Enclosure Support
|
||||
* CONFIG\_DRM\_ACCEL: Compute Acceleration Framework
|
||||
* CONFIG\_DRM\_ACCEL\_HABANALABS: HabanaLabs AI accelerators
|
||||
* CONFIG\_DRM\_ACCEL\_IVPU: Intel NPU (Neural Processing Unit)
|
||||
* CONFIG\_DRM\_ACCEL\_QAIC: Qualcomm Cloud AI accelerators
|
||||
* zfs: version 2.3.1
|
||||
* Wireless support
|
||||
|
||||
* Atheros/Qualcomm
|
||||
* Broadcom
|
||||
* Intel
|
||||
* Marvell
|
||||
* Microtek
|
||||
* Realtek
|
||||
|
||||
### Base distro updates[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#base-distro-updates "Direct link to Base distro updates")
|
||||
|
||||
* aaa\_glibc-solibs: version 2.41
|
||||
* adwaita-icon-theme: version 48.0
|
||||
* at-spi2-core: version 2.56.1
|
||||
* bind: version 9.20.8
|
||||
* btrfs-progs: version 6.14
|
||||
* ca-certificates: version 20250425
|
||||
* cairo: version 1.18.4
|
||||
* cifs-utils: version 7.3
|
||||
* coreutils: version 9.7
|
||||
* dbus: version 1.16.2
|
||||
* dbus-glib: version 0.114
|
||||
* dhcpcd: version 9.5.2
|
||||
* diffutils: version 3.12
|
||||
* dnsmasq: version 2.91
|
||||
* docker: version 27.5.1
|
||||
* e2fsprogs: version 1.47.2
|
||||
* elogind: version 255.17
|
||||
* elfutils: version 0.193
|
||||
* ethtool: version 6.14
|
||||
* firefox: version 128.10 (AppImage)
|
||||
* floppy: version 5.6
|
||||
* fontconfig: version 2.16.2
|
||||
* gdbm: version 1.25
|
||||
* git: version 2.49.0
|
||||
* glib2: version 2.84.1
|
||||
* glibc: version 2.41
|
||||
* glibc-zoneinfo: version 2025b
|
||||
* grep: version 3.12
|
||||
* gtk+3: version 3.24.49
|
||||
* gzip: version 1.14
|
||||
* harfbuzz: version 11.1.0
|
||||
* htop: version 3.4.1
|
||||
* icu4c: version 77.1
|
||||
* inih: version 60
|
||||
* intel-microcode: version 20250211
|
||||
* iperf3: version 3.18
|
||||
* iproute2: version 6.14.0
|
||||
* iw: version 6.9
|
||||
* jansson: version 2.14.1
|
||||
* kernel-firmware: version 20250425\_cf6ea3d
|
||||
* kmod: version 34.2
|
||||
* less: version 674
|
||||
* libSM: version 1.2.6
|
||||
* libX11: version 1.8.12
|
||||
* libarchive: version 3.7.8
|
||||
* libcgroup: version 3.2.0
|
||||
* libedit: version 20250104\_3.1
|
||||
* libevdev: version 1.13.4
|
||||
* libffi: version 3.4.8
|
||||
* libidn: version 1.43
|
||||
* libnftnl: version 1.2.9
|
||||
* libnvme: version 1.13
|
||||
* libgpg-error: version 1.55
|
||||
* libpng: version 1.6.47
|
||||
* libseccomp: version 2.6.0
|
||||
* liburing: version 2.9
|
||||
* libusb: version 1.0.28
|
||||
* libuv: version 1.51.0
|
||||
* libvirt: version 11.2.0
|
||||
* libXft: version 2.3.9
|
||||
* libxkbcommon: version 1.9.0
|
||||
* libxml2: version 2.13.8
|
||||
* libxslt: version 1.1.43
|
||||
* libzip: version 1.11.3
|
||||
* linuxptp: version 4.4
|
||||
* lvm2: version 2.03.31
|
||||
* lzip: version 1.25
|
||||
* lzlib: version 1.15
|
||||
* mcelog: version 204
|
||||
* mesa: version 25.0.4
|
||||
* mpfr: version 4.2.2
|
||||
* nano: version 8.4
|
||||
* ncurses: version 6.5\_20250419
|
||||
* nettle: version 3.10.1
|
||||
* nghttp2: version 1.65.0
|
||||
* nghttp3: version 1.9.0
|
||||
* noto-fonts-ttf: version 2025.03.01
|
||||
* nvme-cli: version 2.13
|
||||
* oniguruma: version 6.9.10
|
||||
* openssh: version 10.0p1
|
||||
* openssl: version 3.5.0
|
||||
* ovmf: version stable202502
|
||||
* pam: version 1.7.0
|
||||
* pango: version 1.56.3
|
||||
* parted: version 3.6
|
||||
* patch: version 2.8
|
||||
* pcre2: version 10.45
|
||||
* perl: version 5.40.2
|
||||
* php: version 8.3.19
|
||||
* procps-ng: version 4.0.5
|
||||
* qemu: version 9.2.3
|
||||
* rsync: version 3.4.1
|
||||
* samba: version 4.21.3
|
||||
* shadow: version 4.17.4
|
||||
* spice: version 0.15.2
|
||||
* spirv-llvm-translator: version 20.1.0
|
||||
* sqlite: version 3.49.1
|
||||
* sysstat: version 12.7.7
|
||||
* sysvinit: version 3.14
|
||||
* talloc: version 2.4.3
|
||||
* tdb: version 1.4.13
|
||||
* tevent: version 0.16.2
|
||||
* tree: version 2.2.1
|
||||
* userspace-rcu: version 0.15.2
|
||||
* utempter: version 1.2.3
|
||||
* util-linux: version 2.41
|
||||
* virglrenderer: version 1.1.1
|
||||
* virtiofsd: version 1.13.1
|
||||
* which: version 2.23
|
||||
* wireless-regdb: version 2025.02.20
|
||||
* wpa\_supplicant: version 2.11
|
||||
* xauth: version 1.1.4
|
||||
* xf86-input-synaptics: version 1.10.0
|
||||
* xfsprogs: version 6.14.0
|
||||
* xhost: version 1.0.10
|
||||
* xinit: version 1.4.4
|
||||
* xkeyboard-config: version 2.44
|
||||
* xorg-server: version 21.1.16
|
||||
* xterm: version 398
|
||||
* xtrans: version 1.6.0
|
||||
* xz: version 5.8.1
|
||||
* zstd: version 1.5.7
|
||||
|
||||
Patches[](https://docs.unraid.net/unraid-os/release-notes/7.1.0#patches "Direct link to Patches")
|
||||
|
||||
---------------------------------------------------------------------------------------------------
|
||||
|
||||
No patches are currently available for this release.
|
||||
|
||||
* [Upgrading](https://docs.unraid.net/unraid-os/release-notes/7.1.0#upgrading)
|
||||
* [Known issues](https://docs.unraid.net/unraid-os/release-notes/7.1.0#known-issues)
|
||||
|
||||
* [Rolling back](https://docs.unraid.net/unraid-os/release-notes/7.1.0#rolling-back)
|
||||
|
||||
* [Changes vs. 7.0.1](https://docs.unraid.net/unraid-os/release-notes/7.1.0#changes-vs-701)
|
||||
* [Storage](https://docs.unraid.net/unraid-os/release-notes/7.1.0#storage)
|
||||
|
||||
* [Networking](https://docs.unraid.net/unraid-os/release-notes/7.1.0#networking)
|
||||
|
||||
* [VM Manager](https://docs.unraid.net/unraid-os/release-notes/7.1.0#vm-manager)
|
||||
|
||||
* [User VM Templates](https://docs.unraid.net/unraid-os/release-notes/7.1.0#user-vm-templates)
|
||||
|
||||
* [WebGUI](https://docs.unraid.net/unraid-os/release-notes/7.1.0#webgui)
|
||||
|
||||
* [Misc](https://docs.unraid.net/unraid-os/release-notes/7.1.0#misc)
|
||||
* [Other changes](https://docs.unraid.net/unraid-os/release-notes/7.1.0#other-changes)
|
||||
|
||||
* [Linux kernel](https://docs.unraid.net/unraid-os/release-notes/7.1.0#linux-kernel)
|
||||
|
||||
* [Base distro updates](https://docs.unraid.net/unraid-os/release-notes/7.1.0#base-distro-updates)
|
||||
|
||||
* [Patches](https://docs.unraid.net/unraid-os/release-notes/7.1.0#patches)
|
||||
@@ -1,348 +0,0 @@
|
||||
[Skip to main content](https://docs.unraid.net/unraid-os/release-notes/7.2.0#__docusaurus_skipToContent_fallback)
|
||||
|
||||
On this page
|
||||
|
||||
The Unraid webGUI is now responsive! The interface automatically adapts to different screen sizes, making it usable on mobile devices, tablets, and desktop monitors alike. The Unraid API is now built in, and the release also brings RAIDZ expansion, Ext2/3/4, NTFS and exFAT support, and the (optional) ability to login to the webGUI via SSO, among other features and bug fixes.
|
||||
|
||||
Note that some plugins may have visual issues in this release; please give plugin authors time to make adjustments. Plugin authors, please see this post describing [how to update your plugins to make them responsive](https://forums.unraid.net/topic/192172-responsive-webgui-plugin-migration-guide/)
|
||||
.
|
||||
|
||||
Upgrading[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#upgrading "Direct link to Upgrading")
|
||||
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
|
||||
For step-by-step instructions, see [Upgrading Unraid](https://docs.unraid.net/unraid-os/system-administration/maintain-and-update/upgrading-unraid/)
|
||||
. Questions about your [license](https://docs.unraid.net/unraid-os/troubleshooting/licensing-faq/#license-types--features)
|
||||
?
|
||||
|
||||
### Known issues[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#known-issues "Direct link to Known issues")
|
||||
|
||||
#### Plugins[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#plugins "Direct link to Plugins")
|
||||
|
||||
The Theme Engine, Dark Theme, Dynamix Date Time, and Flash Remount plugins are incompatible and will be automatically uninstalled, as will outdated versions of Unraid Connect.
|
||||
|
||||
Please upgrade all plugins, particularly Unraid Connect and the Nvidia driver, before updating. Note that some plugins may have visual issues in this release; please give plugin authors time to make adjustments.
|
||||
|
||||
For other known issues, see the [7.1.4 release notes](https://docs.unraid.net/unraid-os/release-notes/7.1.4/#known-issues)
|
||||
.
|
||||
|
||||
### Rolling back[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#rolling-back "Direct link to Rolling back")
|
||||
|
||||
If rolling back earlier than 7.1.4, also see the [7.1.4 release notes](https://docs.unraid.net/unraid-os/release-notes/7.1.4/#rolling-back)
|
||||
.
|
||||
|
||||
Changes vs. [7.1.4](https://docs.unraid.net/unraid-os/release-notes/7.1.4/)
|
||||
[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#changes-vs-714 "Direct link to changes-vs-714")
|
||||
|
||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
### Storage[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#storage "Direct link to Storage")
|
||||
|
||||
#### ZFS RAIDZ expansion[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#zfs-raidz-expansion "Direct link to ZFS RAIDZ expansion")
|
||||
|
||||
You can now expand your single-vdev RAIDZ1/2/3 pools, one drive at a time. For detailed instructions, see [RAIDZ expansion](https://docs.unraid.net/unraid-os/release-notes/7.2.0/warn/)
|
||||
.
|
||||
|
||||
* With the array running, on **_Main → Pool Devices_**, select the pool name to view the details
|
||||
* In the **Pool Status** area, check for an **Upgrade Pool** button. If one exists, you'll need to click that before continuing. Note that upgrading the pool will limit your ability to downgrade to earlier releases of Unraid (7.1 should be OK, but not 7.0)
|
||||
* Stop the array
|
||||
* On **_Main → Pool Devices_**, add a slot to the pool
|
||||
* Select the appropriate drive (must be at least as large as the smallest drive in the pool)
|
||||
* Start the array
|
||||
|
||||
#### Enhancements[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#enhancements "Direct link to Enhancements")
|
||||
|
||||
* Fix: There will now be an "invalid expansion" warning if the pool needs to be upgraded first
|
||||
* Improvement: Better defaults for ZFS RAIDZ vdevs
|
||||
|
||||
#### Ext2/3/4, NTFS, and exFAT Support[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#ext234-ntfs-and-exfat-support "Direct link to Ext2/3/4, NTFS, and exFAT Support")
|
||||
|
||||
Unraid now supports Ext2/3/4, NTFS, and exFAT drive formats in addition to XFS, BTRFS, and ZFS.
|
||||
|
||||
Use case: say you are a content creator with a box full of hard drives containing all of your historical videos. When first creating an array (or after running **_Tools → New Config_**), add all of your existing data drives (blank, or with data in a supported drive format) to the array. Any parity drives will be overwritten but the data drives will retain their data. You can enjoy parity protection, share them on the network, and take full advantage of everything Unraid has to offer.
|
||||
|
||||
Critical note: you can continue adding filled data drives to the array up until you start the array with a parity drive installed. Once a parity drive has been added, any new data drives will be zeroed out when they are added to the array.
|
||||
|
||||
To clarify, Unraid has always worked this way; what is new is that Unraid now supports additional drive formats.
|
||||
|
||||
Additionally, you can create single drive pools using the new formats as well.
|
||||
|
||||
* Improved the usability of the **File System Type** dropdown as the list of available options is growing
|
||||
|
||||
#### Warn about deprecated file systems[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#warn-about-deprecated-file-systems "Direct link to Warn about deprecated file systems")
|
||||
|
||||
The **_Main_** page will now warn if any array or pool drives are formatted with ReiserFS; these drives need to be migrated to another filesystem ASAP as they will not be usable in a future release of Unraid (likely Unraid 7.3). Similarly, it will warn if there are drives formatted in a deprecated version of XFS; those need to be migrated before 2030. See [Converting to a new file system type](https://docs.unraid.net/unraid-os/using-unraid-to/manage-storage/file-systems/#converting-to-a-new-file-system-type)
|
||||
in the docs for details.
|
||||
|
||||
#### Other storage changes[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#other-storage-changes "Direct link to Other storage changes")
|
||||
|
||||
* Improvement: Two-device ZFS pools are mirrored by default, but you can make them RAIDZ1 if you plan to expand that vdev in the future
|
||||
* Improvement: Add **File system status** to **DeviceInfo** page, showing whether a drive is mounted/unmounted and empty/not empty
|
||||
* Fix: Display issue on Main page when two pools are named similarly
|
||||
* Fix: [glibc bug](https://github.com/openzfs/zfs/issues/17629)
|
||||
which could lead to data loss with ZFS
|
||||
* Fix: BTRFS array disks with multiple filesystem signatures don't mount
|
||||
* Fix: Resolved some issues for parity disks with existing 1MiB aligned partitions
|
||||
* Fix: When stopping array, do not attempt 'umount' on array devices that are not mounted
|
||||
* Improvement: Exclusive shares may be selected for NFS export
|
||||
* Improvement: Disallow shares named `homes`, `global`, and `printers` (these have special meaning in Samba)
|
||||
* Fix: Correct handling of case-insensitive share names
|
||||
* Fix: Shares with invalid characters in names could not be deleted or modified
|
||||
* Fix: Improvements to reading from/writing to SMB Security Settings
|
||||
* Improvement: A top-level `lost+found` directory will not be shared
|
||||
* Fix: In smb.conf, set `smb3 directory leases = no` to avoid issues with the current release of Samba
|
||||
* Fix: Restore comments in default `/etc/modprobe.d/*.conf` files
|
||||
* Fix: Windows fails to create a new folder for a share with primary=ZFS pool and secondary=EXT4 array disk
|
||||
* Fix: New devices added to an existing array with valid parity should be repartitioned
|
||||
* Fix: Do not spin down devices for which SMART self-test is in progress
|
||||
* Fix: New array device not available for shares until the array is restarted
|
||||
* Fix: ZFS allocation profile always shows one vdev only
|
||||
|
||||
### Networking[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#networking "Direct link to Networking")
|
||||
|
||||
#### Other networking changes[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#other-networking-changes "Direct link to Other networking changes")
|
||||
|
||||
* Feature: IPv6 Docker custom networks now support Unique Local Addresses (ULA) in addition to the more standard Global Unicast Addresses (GUA), assuming your router provides both subnets when the Unraid host gets an IPv6 address via DHCP or SLAAC. To use, assign a custom static IP from the appropriate subnet to the container.
|
||||
* Fix: The **_Settings → Network Settings → Interface Rules_** page sometimes showed the wrong network driver (was just a display issue)
|
||||
|
||||
### VM Manager[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#vm-manager "Direct link to VM Manager")
|
||||
|
||||
* Feature: Save PCI hardware data, warn if hardware used by VM changes
|
||||
* Feature: Support virtual sound cards in VMs
|
||||
|
||||
#### Other VM changes[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#other-vm-changes "Direct link to Other VM changes")
|
||||
|
||||
* Improvement: Enhance multi-monitor support, automatically enabling spicevmc when needed
|
||||
* Feature: Upgrade to noVNC v1.6
|
||||
* Removed historical OpenElec and LibreElec VM templates
|
||||
* Fix: VM Console did not work when user shares were disabled
|
||||
* Fix: Don't allow single quotes in Domains storage path
|
||||
* Fix: Change Windows 11 VM defaults
|
||||
* Fix: Unable to view vdisk locations in languages other than English
|
||||
* Fix: No capacity warning when editing a VM to add a 2nd vdisk
|
||||
* Fix: Cdrom Bus: select IDE for i440 and SATA for q35
|
||||
|
||||
### Unraid API[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#unraid-api "Direct link to Unraid API")
|
||||
|
||||
The Unraid API is now built into Unraid! The new Notifications panel is the first major feature to use it, over time the entire webGUI will be updated to use it.
|
||||
|
||||
The Unraid API is fully open source: [https://github.com/unraid/api](https://github.com/unraid/api)
|
||||
. Get started in the [API docs](https://docs.unraid.net/API/)
|
||||
.
|
||||
|
||||
The Unraid Connect plugin adds functionality which communicates with our cloud servers; it remains completely optional.
|
||||
|
||||
#### Other Unraid API changes[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#other-unraid-api-changes "Direct link to Other Unraid API changes")
|
||||
|
||||
* dynamix.unraid.net 4.25.3 - [see changes](https://github.com/unraid/api/releases)
|
||||
|
||||
|
||||
### WebGUI[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#webgui "Direct link to WebGUI")
|
||||
|
||||
#### Responsive CSS[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#responsive-css "Direct link to Responsive CSS")
|
||||
|
||||
The Unraid webGUI is now responsive! Most screens should now work as well on your phone as they do on your desktop monitor.
|
||||
|
||||
#### Login to the webGUI via SSO[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#login-to-the-webgui-via-sso "Direct link to Login to the webGUI via SSO")
|
||||
|
||||
Login to the Unraid webGUI using Single Sign-On (SSO) with your Unraid.net account or any other OIDC-compliant provider. For details on this _optional_ feature, see [OIDC Provider Setup](https://docs.unraid.net/API/oidc-provider-setup/)
|
||||
in the Docs.
|
||||
|
||||
#### Other WebGUI changes[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#other-webgui-changes "Direct link to Other WebGUI changes")
|
||||
|
||||
* Feature: Add new notifications management view, access via the bell in the upper right corner of the webGUI
|
||||
* Feature: Add progress indicator to Docker / Plugin / VM popup window
|
||||
* Feature: Show countdown timer on login page when locked out due to too many incorrect login attempts
|
||||
* Feature: Add _Force Install_ button to bypass version checks when manually installing plugins
|
||||
* Feature: Add **_Tools → Open Terminal_** page; can access it by searching for "terminal". Can optionally remove Terminal button from toolbar via **_Settings → Display Settings → Show Terminal Button in header_**
|
||||
* Feature: **_Users → Root → SSH authorized keys_** now supports more formats (thanks [wandercone](https://github.com/wandercone)
|
||||
)
|
||||
* Feature: Added a welcome screen for new systems, shown after setting the root password
|
||||
* Fix: Re-enable smart test buttons after completion of test
|
||||
* Fix: Prevent webGUI from crashing when dynamix.cfg is corrupt, log any issues
|
||||
* Fix: `blob:` links shouldn't be considered external
|
||||
* Feature: Differentiate between Intel E-Cores and P-Cores on the Dashboard
|
||||
* Feature: Dashboard now gets CPU usage stats from the Unraid API
|
||||
* Fix: Dashboard: More than 1TB of RAM was not reported correctly
|
||||
* Chore: Change charting libraries on the Dashboard
|
||||
* Fix: Prevent Firefox from showing resend/cancel popup when starting array (thanks [dkaser](https://github.com/dkaser)
|
||||
)
|
||||
* Fix: File Manager: stop spinner and show error when it fails (thanks [poroyo](https://github.com/poroyo)
|
||||
)
|
||||
* Feature: Speed up rendering of Plugins and Docker pages
|
||||
* Fix: Prevent issues when clicking an external link from within a changelog
|
||||
* Improvement: Show RAM and network speed in human-readable units
|
||||
* Fix: On _**Settings → Display Settings → Font size**_, remove extreme options that break the webGUI
|
||||
|
||||
Misc[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#misc "Direct link to Misc")
|
||||
|
||||
------------------------------------------------------------------------------------------
|
||||
|
||||
* Feature: Do not execute `go` script when in safe mode, create `/boot/config/go.safemode` script if needed
|
||||
* Improvement: Require authentication on `http://localhost`. This improves security and allows Tailscale Funnel to work with the webGUI. Note that when booting in GUI mode, you will now need to login again to access the webGUI.
|
||||
* Feature: Add favicon and web app manifest support
|
||||
* Feature: License key upgrades are installed automatically, without needing to restart the array
|
||||
* Feature: Thunderbolt devices will be auto-authorized when connected
|
||||
* Feature: Improvements to custom udev rules and scripts, at boot:
|
||||
* `/boot/config/udev/*.rules` are copied to `/etc/udev/rules.d/`
|
||||
* `/boot/config/udev/*.sh` are copied to `/etc/udev/scripts/` where they can be used by your custom udev rules
|
||||
* Fix: Remove support for nonworking ipv6.hash.myunraid.net URLs
|
||||
* Fix: Docker custom network creation failed when IPv6 was enabled
|
||||
* Fix: Resolve issues with high CPU load due to nchan and lsof
|
||||
* Improvement: Removed option to disable live updates on inactive browsers; should no longer be needed
|
||||
* Improvement: Better messaging around mover and "dangling links"
|
||||
* Fix: Prevent errors related to _searchLink_ when installing plugins
|
||||
* Fix: PHP warnings importing WireGuard tunnels
|
||||
* Improvement: _Europe/Kiev_ timezone renamed to _Europe/Kyiv_ to align with the IANA Time Zone Database
|
||||
* Improvement: Enhance Discord notification agent; enable/disable the agent to get the updates (thanks [mgutt](https://github.com/mgutt)
|
||||
)
|
||||
* Fix: Further anonymization of diagnostics.zip
|
||||
* Improvement: Protect WebGUI from fatal PHP errors
|
||||
* Improvement: Adjust logging during plugin installs
|
||||
* Fix: CPU Pinning for Docker containers could crash in certain instances
|
||||
* Fix: Docker NAT failure due to missing br\_netfilter
|
||||
* Fix: Scheduled mover runs not logged
|
||||
|
||||
### Linux kernel[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#linux-kernel "Direct link to Linux kernel")
|
||||
|
||||
* version 6.12.54-Unraid
|
||||
* built-in: CONFIG\_EFIVAR\_FS: EFI Variable filesystem
|
||||
* CONFIG\_INTEL\_RAPL: Intel RAPL support via MSR interface
|
||||
* CONFIG\_NLS\_DEFAULT: change from "iso8859-1" to "utf8"
|
||||
* Added eMMC support:
|
||||
* CONFIG\_MMC: MMC/SD/SDIO card support
|
||||
* CONFIG\_MMC\_BLOCK: MMC block device driver
|
||||
* CONFIG\_MMC\_SDHCI: Secure Digital Host Controller Interface support
|
||||
* CONFIG\_MMC\_SDHCI\_PCI: SDHCI support on PCI bus
|
||||
* CONFIG\_MMC\_SDHCI\_ACPI: SDHCI support for ACPI enumerated SDHCI controllers
|
||||
* CONFIG\_MMC\_SDHCI\_PLTFM: SDHCI platform and OF driver helper
|
||||
|
||||
### Base distro updates[](https://docs.unraid.net/unraid-os/release-notes/7.2.0#base-distro-updates "Direct link to Base distro updates")
|
||||
|
||||
* aaa\_glibc-solibs: version 2.42
|
||||
* adwaita-icon-theme: version 48.1
|
||||
* at-spi2-core: version 2.58.1
|
||||
* bash: version 5.3.003
|
||||
* bind: version 9.20.13
|
||||
* btrfs-progs: version 6.17
|
||||
* ca-certificates: version 20251003
|
||||
* cifs-utils: version 7.4
|
||||
* coreutils: version 9.8
|
||||
* cryptsetup: version 2.8.1
|
||||
* curl: version 8.16.0
|
||||
* e2fsprogs: version 1.47.3
|
||||
* ethtool: version 6.15
|
||||
* exfatprogs: version 1.3.0
|
||||
* fontconfig: version 2.17.1
|
||||
* freetype: version 2.14.0
|
||||
* gdbm: version 1.26
|
||||
* gdk-pixbuf2: version 2.44.3
|
||||
* git: version 2.51.1
|
||||
* glib2: version 2.86.0
|
||||
* glibc: version 2.42 (build 2)
|
||||
* gnutls: version 3.8.10
|
||||
* grub: version 2.12
|
||||
* gtk+3: version 3.24.51
|
||||
* harfbuzz: version 12.1.0
|
||||
* intel-microcode: version 20250812
|
||||
* iproute2: version 6.17.0
|
||||
* inih: version 61
|
||||
* inotify-tools: version 4.25.9.0
|
||||
* iputils: version 20250605
|
||||
* iw: version 6.17
|
||||
* json-glib: version 1.10.8
|
||||
* kbd: version 2.9.0
|
||||
* kernel-firmware: version 20251018\_8b4de42
|
||||
* krb5: version 1.22.1
|
||||
* less: version 685
|
||||
* libXfixes: version 6.0.2
|
||||
* libXpresent: version 1.0.2
|
||||
* libXres: version 1.2.3
|
||||
* libarchive: version 3.8.2
|
||||
* libdrm: version 2.4.127
|
||||
* libedit: version 20251016\_3.1
|
||||
* libevdev: version 1.13.5
|
||||
* libffi: version 3.5.2
|
||||
* libgpg-error: version 1.56
|
||||
* libjpeg-turbo: version 3.1.2
|
||||
* libnftnl: version 1.3.0
|
||||
* libnvme: version 1.15
|
||||
* libpng: version 1.6.50
|
||||
* libssh: version 0.11.3
|
||||
* libtiff: version 4.7.1
|
||||
* libtirpc: version 1.3.7
|
||||
* libunwind: version 1.8.3
|
||||
* liburing: version 2.12
|
||||
* libusb: version 1.0.29
|
||||
* libwebp: version 1.6.0
|
||||
* libvirt: version 11.7.0
|
||||
* libxkbcommon: version 1.11.0
|
||||
* libxml2: version 2.14.6
|
||||
* libzip: version 1.11.4
|
||||
* lsof: version 4.99.5
|
||||
* lvm2: version 2.03.35
|
||||
* mcelog: version 207
|
||||
* mesa: version 25.2.5
|
||||
* nano: version 8.6
|
||||
* ncurses: version 6.5\_20250816
|
||||
* nettle: version 3.10.2
|
||||
* nghttp2: version 1.67.1
|
||||
* nghttp3: version 1.12.0
|
||||
* noto-fonts-ttf: version 2025.10.01
|
||||
* nvme-cli: version 2.15
|
||||
* openssh: version 10.2p1
|
||||
* openssl: version 3.5.4
|
||||
* ovmf: version unraid202502
|
||||
* p11-kit: version 0.25.10
|
||||
* pam: version 1.7.1
|
||||
* pcre2: version 10.46
|
||||
* pango: version 1.56.4
|
||||
* pciutils: version 3.14.0
|
||||
* perl: version 5.42.0
|
||||
* php: version 8.3.26-x86\_64-1\_LT with gettext extension
|
||||
* pixman: version 0.46.4
|
||||
* rclone: version 1.70.1-x86\_64-1\_SBo\_LT.tgz
|
||||
* readline: version 8.3.001
|
||||
* samba: version 4.23.2
|
||||
* shadow: version 4.18.0
|
||||
* smartmontools: version 7.5
|
||||
* spirv-llvm-translator: version 21.1.1
|
||||
* sqlite: version 3.50.4
|
||||
* sudo: version 1.9.17p2
|
||||
* sysstat: version 12.7.8
|
||||
* sysvinit: version 3.15
|
||||
* tdb: version 1.4.14
|
||||
* tevent: version 0.17.1
|
||||
* userspace-rcu: version 0.15.3
|
||||
* util-linux: version 2.41.2
|
||||
* wayland: version 1.24.0
|
||||
* wireguard-tools: version 1.0.20250521
|
||||
* wireless-regdb: version 2025.10.07
|
||||
* xdpyinfo: version 1.4.0
|
||||
* xdriinfo: version 1.0.8
|
||||
* xfsprogs: version 6.16.0
|
||||
* xkeyboard-config: version 2.46
|
||||
* xorg-server: version 21.1.18
|
||||
* xterm: version 402
|
||||
* zfs: version zfs-2.3.4\_6.12.54\_Unraid-x86\_64-2\_LT
|
||||
|
||||
* [Upgrading](https://docs.unraid.net/unraid-os/release-notes/7.2.0#upgrading)
|
||||
* [Known issues](https://docs.unraid.net/unraid-os/release-notes/7.2.0#known-issues)
|
||||
|
||||
* [Rolling back](https://docs.unraid.net/unraid-os/release-notes/7.2.0#rolling-back)
|
||||
|
||||
* [Changes vs. 7.1.4](https://docs.unraid.net/unraid-os/release-notes/7.2.0#changes-vs-714)
|
||||
* [Storage](https://docs.unraid.net/unraid-os/release-notes/7.2.0#storage)
|
||||
|
||||
* [Networking](https://docs.unraid.net/unraid-os/release-notes/7.2.0#networking)
|
||||
|
||||
* [VM Manager](https://docs.unraid.net/unraid-os/release-notes/7.2.0#vm-manager)
|
||||
|
||||
* [Unraid API](https://docs.unraid.net/unraid-os/release-notes/7.2.0#unraid-api)
|
||||
|
||||
* [WebGUI](https://docs.unraid.net/unraid-os/release-notes/7.2.0#webgui)
|
||||
|
||||
* [Misc](https://docs.unraid.net/unraid-os/release-notes/7.2.0#misc)
|
||||
* [Linux kernel](https://docs.unraid.net/unraid-os/release-notes/7.2.0#linux-kernel)
|
||||
|
||||
* [Base distro updates](https://docs.unraid.net/unraid-os/release-notes/7.2.0#base-distro-updates)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,569 +0,0 @@
|
||||
# ExaAI Research Findings: Unraid API Ecosystem
|
||||
|
||||
**Date:** 2026-02-07
|
||||
**Research Topic:** Unraid API Ecosystem - Architecture, Authentication, GraphQL Schema, Integrations, and MCP Server
|
||||
**Specialist:** ExaAI Semantic Search
|
||||
|
||||
## Methodology
|
||||
|
||||
- **Total queries executed:** 22
|
||||
- **Total unique URLs discovered:** 55+
|
||||
- **Sources deep-read:** 14
|
||||
- **Search strategy:** Multi-perspective semantic search covering official docs, source code analysis, community integrations, DeepWiki architecture analysis, feature roadmap, and third-party client libraries
|
||||
|
||||
---
|
||||
|
||||
## Key Findings
|
||||
|
||||
### 1. Unraid API Overview and Availability
|
||||
|
||||
The Unraid API provides a **GraphQL interface** for programmatic interaction with Unraid servers. Starting with **Unraid 7.2** (released 2025-10-29), the API comes **built into the operating system** with no plugin installation required ([source](https://docs.unraid.net/API/)).
|
||||
|
||||
Key capabilities include:
|
||||
- Automation, monitoring, and integration through a modern, strongly-typed API
|
||||
- Multiple authentication methods (API keys, session cookies, SSO/OIDC)
|
||||
- Comprehensive system coverage
|
||||
- Built-in developer tools including a GraphQL Sandbox
|
||||
|
||||
For **pre-7.2 versions**, the API is available via the Unraid Connect plugin from Community Applications. Users do **not** need to sign in to Unraid Connect to use the API locally ([source](https://docs.unraid.net/API/)).
|
||||
|
||||
The API was announced alongside Unraid 7.2.0 which also brought RAIDZ expansion, responsive WebGUI, and SSO login capabilities ([source](https://docs.unraid.net/unraid-os/release-notes/7.2.0/)).
|
||||
|
||||
### 2. Architecture and Technology Stack
|
||||
|
||||
The Unraid API is organized as a **pnpm workspace monorepo** containing 8+ packages ([source](https://deepwiki.com/unraid/api), [source](https://github.com/unraid/api)):
|
||||
|
||||
**Core Packages:**
|
||||
| Package | Location | Purpose |
|
||||
|---------|----------|---------|
|
||||
| `@unraid/api` | `api/` | NestJS-based GraphQL server, service layer, OS integration |
|
||||
| `@unraid/web` | `web/` | Vue 3 web application, Apollo Client integration |
|
||||
| `@unraid/ui` | `unraid-ui/` | Reusable Vue components, web component builds |
|
||||
| `@unraid/shared` | `packages/unraid-shared/` | Shared TypeScript types, utilities, constants |
|
||||
| `unraid-api-plugin-connect` | `packages/unraid-api-plugin-connect/` | Remote access, UPnP, dynamic DNS |
|
||||
|
||||
**Backend Technology Stack:**
|
||||
- **NestJS 11.1.6** with **Fastify 5.5.0** HTTP server
|
||||
- **Apollo Server 4.12.2** for GraphQL
|
||||
- **GraphQL 16.11.0** reference implementation
|
||||
- **graphql-ws 6.0.6** for WebSocket subscriptions
|
||||
- **TypeScript 5.9.2** (77.4% of codebase)
|
||||
- **Redux Toolkit** for state management
|
||||
- **Casbin 5.38.0** for RBAC authorization
|
||||
- **PM2 6.0.8** for process management
|
||||
- **dockerode 4.0.7** for Docker container management
|
||||
- **@unraid/libvirt 2.1.0** for VM lifecycle control
|
||||
- **systeminformation 5.27.8** for hardware metrics
|
||||
- **chokidar 4.0.3** for file watching
|
||||
|
||||
**Frontend Technology Stack:**
|
||||
- **Vue 3.5.20** with Composition API
|
||||
- **Apollo Client 3.14.0** with WebSocket subscriptions
|
||||
- **Pinia 3.0.3** for state management
|
||||
- **TailwindCSS 4.1.12** for styling
|
||||
- **Vite 7.1.3** as build tool
|
||||
|
||||
**Current Version:** 4.29.2 (core packages) ([source](https://deepwiki.com/unraid/api))
|
||||
|
||||
### 3. GraphQL API Layer
|
||||
|
||||
The API uses a **code-first approach** where the GraphQL schema is generated automatically from TypeScript decorators ([source](https://deepwiki.com/unraid/api/2.1-graphql-api-layer)):
|
||||
|
||||
- `@ObjectType()` - Defines GraphQL object types
|
||||
- `@InputType()` - Specifies input types for mutations
|
||||
- `@Resolver()` - Declares resolver classes
|
||||
- `@Query()`, `@Mutation()`, `@Subscription()` - Operation decorators
|
||||
|
||||
**Schema Generation Pipeline:**
|
||||
```
|
||||
TypeScript Classes with Decorators
|
||||
-> @nestjs/graphql processes decorators
|
||||
-> Schema generated at runtime
|
||||
-> @graphql-codegen extracts schema
|
||||
-> TypedDocumentNode generated for frontend
|
||||
-> Type-safe operations in Vue 3 client
|
||||
```
|
||||
|
||||
**Key Configuration:**
|
||||
- **autoSchemaFile**: Code-first generation enabled
|
||||
- **introspection**: Always enabled (controlled by security guards)
|
||||
- **subscriptions**: WebSocket via `graphql-ws` protocol
|
||||
- **fieldResolverEnhancers**: Guards enabled for field-level authorization
|
||||
- **transformSchema**: Applies permission checks and conditional field removal
|
||||
|
||||
The GraphQL Sandbox is accessible at `http://YOUR_SERVER_IP/graphql` when enabled through Settings -> Management Access -> Developer Options, or via CLI: `unraid-api developer --sandbox true` ([source](https://docs.unraid.net/API/how-to-use-the-api/)).
|
||||
|
||||
**Live API documentation** is available through Apollo GraphQL Studio for exploring the complete schema ([source](https://docs.unraid.net/API/how-to-use-the-api/)).
|
||||
|
||||
### 4. Authentication and Authorization
|
||||
|
||||
The API implements a **multi-layered security architecture** separating authentication from authorization ([source](https://deepwiki.com/unraid/api/2.2-authentication-and-authorization)):
|
||||
|
||||
#### Authentication Methods
|
||||
|
||||
1. **API Keys** - Programmatic access via `x-api-key` HTTP header
|
||||
- Created via WebGUI (Settings -> Management Access -> API Keys) or CLI
|
||||
- Validated using `passport-http-header-strategy`
|
||||
- JWT verification via `jose 6.0.13`
|
||||
|
||||
2. **Session Cookies** - Automatic when signed into WebGUI
|
||||
|
||||
3. **SSO/OIDC** - External identity providers via `openid-client 6.6.4`
|
||||
- Supported providers: Unraid.net, Google, Microsoft/Azure AD, Keycloak, Authelia, Authentik, Okta
|
||||
- Configuration via Settings -> Management Access -> API -> OIDC
|
||||
- Two authorization modes: Simple (email domain/address) and Advanced (claim-based rules)
|
||||
([source](https://docs.unraid.net/API/oidc-provider-setup/))
|
||||
|
||||
#### API Key Authorization Flow for Third-Party Apps
|
||||
|
||||
Applications can request API access via a self-service flow ([source](https://docs.unraid.net/API/api-key-app-developer-authorization-flow/)):
|
||||
|
||||
```
|
||||
https://[unraid-server]/ApiKeyAuthorize?name=MyApp&scopes=docker:read,vm:*&redirect_uri=https://myapp.com/callback&state=abc123
|
||||
```
|
||||
|
||||
**Scope Format:** `resource:action` pattern
|
||||
- Resources: docker, vm, system, share, user, network, disk
|
||||
- Actions: create, read, update, delete, * (full access)
|
||||
|
||||
#### Programmatic API Key Management
|
||||
|
||||
CLI-based CRUD operations for automation ([source](https://docs.unraid.net/API/programmatic-api-key-management/)):
|
||||
|
||||
```bash
|
||||
# Create with granular permissions
|
||||
unraid-api apikey --create \
|
||||
--name "monitoring key" \
|
||||
--permissions "DOCKER:READ_ANY,ARRAY:READ_ANY" \
|
||||
--description "Read-only access" --json
|
||||
|
||||
# Delete
|
||||
unraid-api apikey --delete --name "monitoring key"
|
||||
```
|
||||
|
||||
**Available Roles:** ADMIN, CONNECT, VIEWER, GUEST
|
||||
|
||||
**Available Resources:** ACTIVATION_CODE, API_KEY, ARRAY, CLOUD, CONFIG, CONNECT, DOCKER, FLASH, INFO, LOGS, NETWORK, NOTIFICATIONS, OS, SERVICES, SHARE, VMS
|
||||
|
||||
**Available Actions:** CREATE_ANY, CREATE_OWN, READ_ANY, READ_OWN, UPDATE_ANY, UPDATE_OWN, DELETE_ANY, DELETE_OWN
|
||||
|
||||
#### RBAC Implementation
|
||||
|
||||
- **Casbin 5.38.0** with **nest-authz 2.17.0** for policy-based access control
|
||||
- **accesscontrol 2.2.1** maintains the permission matrix
|
||||
- **@UsePermissions() directive** provides field-level authorization by removing protected fields from the GraphQL schema dynamically
|
||||
- **Rate limiting:** 100 requests per 10 seconds via `@nestjs/throttler 6.4.0`
|
||||
- **Security headers:** `@fastify/helmet 13.0.1` with minimal CSP
|
||||
|
||||
### 5. CLI Reference
|
||||
|
||||
All commands follow the pattern: `unraid-api <command> [options]` ([source](https://docs.unraid.net/API/cli)):
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `unraid-api start [--log-level <level>]` | Start API service |
|
||||
| `unraid-api stop [--delete]` | Stop API service |
|
||||
| `unraid-api restart` | Restart API service |
|
||||
| `unraid-api logs [-l <lines>]` | View logs (default 100 lines) |
|
||||
| `unraid-api config` | Display configuration |
|
||||
| `unraid-api switch-env [-e <env>]` | Toggle production/staging |
|
||||
| `unraid-api developer [--sandbox true/false]` | Developer mode |
|
||||
| `unraid-api apikey [options]` | API key management |
|
||||
| `unraid-api sso add-user/remove-user/list-users` | SSO user management |
|
||||
| `unraid-api sso validate-token <token>` | Token validation |
|
||||
| `unraid-api report [-r] [-j]` | Generate system report |
|
||||
|
||||
Log levels: trace, debug, info, warn, error, fatal
|
||||
|
||||
### 6. Docker Container Management
|
||||
|
||||
The Docker Management Service provides comprehensive container lifecycle management through GraphQL ([source](https://deepwiki.com/unraid/api/2.4.2-notification-system)):
|
||||
|
||||
**Container Lifecycle Mutations:**
|
||||
- `start(id)` - Start a stopped container
|
||||
- `stop(id)` - Stop with 10-second timeout
|
||||
- `pause(id)` / `unpause(id)` - Suspend/resume
|
||||
- `removeContainer(id, options)` - Remove container and optionally images
|
||||
- `updateContainer(id)` - Upgrade to latest image version
|
||||
- `updateAllContainers()` - Batch update all containers
|
||||
|
||||
**Container Data Enrichment:**
|
||||
- Canonical name extraction via `autostartService`
|
||||
- Auto-start configuration details
|
||||
- Port deduplication (IPv4/IPv6)
|
||||
- LAN-accessible URL computation
|
||||
- State normalization: RUNNING, PAUSED, EXITED
|
||||
|
||||
**Update Detection:**
|
||||
- Compares local image digests against remote registry manifests
|
||||
- Returns `UpdateStatus`: UP_TO_DATE, UPDATE_AVAILABLE, REBUILD_READY, UNKNOWN
|
||||
- Legacy PHP script integration for status computation
|
||||
|
||||
**Real-Time Event Monitoring:**
|
||||
- Watches `/var/run` for Docker socket via chokidar
|
||||
- Filters: start, stop, die, kill, pause, unpause, restart, oom events
|
||||
- Publishes to `PUBSUB_CHANNEL.INFO` for subscription updates
|
||||
|
||||
**Container Organizer:**
|
||||
- Folder-based hierarchical organization
|
||||
- Operations: createFolder, setFolderChildren, deleteEntries, moveEntriesToFolder, renameFolder
|
||||
- Behind `ENABLE_NEXT_DOCKER_RELEASE` feature flag
|
||||
|
||||
**Statistics Streaming:**
|
||||
- Real-time resource metrics via subscriptions
|
||||
- CPU percent, memory usage/percent, network I/O, block I/O
|
||||
- Auto-start/stop streams based on subscription count
|
||||
|
||||
### 7. VM Management
|
||||
|
||||
VM management uses the `@unraid/libvirt` package (v2.1.0) for QEMU/KVM integration ([source](https://github.com/unraid/libvirt), [source](https://deepwiki.com/unraid/api)):
|
||||
|
||||
- Domain state management (start, stop, pause, resume)
|
||||
- Snapshot creation and restoration
|
||||
- Domain XML inspection
|
||||
- Retry logic (up to 2 minutes) for libvirt daemon initialization
|
||||
|
||||
Unraid 7.x enhancements include VM clones, snapshots, user-created VM templates, inline XML editing, and advanced GPU passthrough ([source](https://docs.unraid.net/unraid-os/manual/vm/vm-management/)).
|
||||
|
||||
### 8. Storage and Array Management
|
||||
|
||||
**Array Operations** (available via Python client library):
|
||||
- `start_array()` / `stop_array()`
|
||||
- `start_parity_check(correct)` / `pause_parity_check()` / `resume_parity_check()` / `cancel_parity_check()`
|
||||
- `spin_up_disk(id)` / `spin_down_disk(id)`
|
||||
|
||||
**GraphQL Queries for Storage:**
|
||||
|
||||
```graphql
|
||||
# Disk Information
|
||||
{ disks { device name type size vendor temperature smartStatus } }
|
||||
|
||||
# Share Information
|
||||
{ shares { name comment free size used } }
|
||||
|
||||
# Array Status (from official docs example)
|
||||
{ array { state capacity { free used total } disks { name size status temp } } }
|
||||
```
|
||||
|
||||
([source](https://deepwiki.com/domalab/unraid-api-client/4.3-network-and-storage-queries), [source](https://docs.unraid.net/API/how-to-use-the-api/))
|
||||
|
||||
**ZFS Support:** Unraid supports ZFS pools with automatic data integrity, built-in RAID (mirrors, RAIDZ), snapshots, and send/receive ([source](https://docs.unraid.net/unraid-os/advanced-configurations/optimize-storage/zfs-storage/)).
|
||||
|
||||
### 9. Network Management
|
||||
|
||||
**Network Query Fields:**
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| iface | String | Interface identifier |
|
||||
| ifaceName | String | Interface name |
|
||||
| ipv4/ipv6 | String | IP addresses |
|
||||
| mac | String | MAC address |
|
||||
| operstate | String | Operational state (up/down) |
|
||||
| type | String | Interface type |
|
||||
| duplex | String | Duplex mode |
|
||||
| speed | Number | Interface speed |
|
||||
| accessUrls | Array | Access URLs for the interface |
|
||||
|
||||
```graphql
|
||||
{ network { iface ifaceName ipv4 ipv6 mac operstate type duplex speed accessUrls { type name ipv4 ipv6 } } }
|
||||
```
|
||||
|
||||
([source](https://deepwiki.com/domalab/unraid-api-client/4.3-network-and-storage-queries))
|
||||
|
||||
### 10. Notification System
|
||||
|
||||
The Unraid API exposes a notification system with the following features ([source](https://deepwiki.com/unraid/api)):
|
||||
|
||||
- File-based notifications stored in `/unread/` and `/archive/` directories
|
||||
- GraphQL queries for notification overview (counts by type)
|
||||
- Notification listing with filters
|
||||
- Notification agents: email, Discord, Slack (built-in); custom agents via scripts
|
||||
|
||||
Community solutions for additional notification targets include ntfy.sh, Matrix, and webhook-based approaches ([source](https://forums.unraid.net/topic/88464-webhook-notification-method/), [source](https://lder.dev/posts/ntfy-Notifications-With-unRAID/)).
|
||||
|
||||
### 11. WebSocket Subscriptions (Real-Time)
|
||||
|
||||
The API implements real-time subscriptions via the `graphql-ws` protocol (v6.0.6) ([source](https://deepwiki.com/unraid/api/2.1-graphql-api-layer)):
|
||||
|
||||
- **PubSub Engine:** `graphql-subscriptions@3.0.0` for event publishing
|
||||
- **Transport:** WebSocket via `graphql-ws` protocol
|
||||
- **Trigger:** Redux store updates from file watchers propagate to subscribed clients
|
||||
- **Available subscriptions include:**
|
||||
- Container state changes
|
||||
- Container statistics (CPU, memory, I/O)
|
||||
- System metrics updates
|
||||
- Array status changes
|
||||
|
||||
The subscription system is event-driven: file changes on disk (detected by chokidar) -> Redux store update -> PubSub event -> WebSocket push to clients.
|
||||
|
||||
### 12. MCP Server Integrations
|
||||
|
||||
**jmagar/unraid-mcp** (this project) is the primary MCP server for Unraid ([source](https://glama.ai/mcp/servers/@jmagar/unraid-mcp), [source](https://mcpmarket.com/server/unraid)):
|
||||
|
||||
- Python-based MCP server using FastMCP framework
|
||||
- 10 tools with 90 actions for comprehensive Unraid management
|
||||
- Read-only access by default for safety
|
||||
- Listed on Glama, MCP Market, MCPServers.com, LangDB, UBOS, JuheAPI
|
||||
- 21 GitHub stars
|
||||
- Communicates via stdio transport
|
||||
|
||||
**Alternative MCP implementations:**
|
||||
- `lwsinclair/unraid-mcp` - Another MCP implementation ([source](https://github.com/lwsinclair/unraid-mcp))
|
||||
- `ruaan-deysel/unraid-management-agent` - Go-based plugin with REST API + WebSocket + MCP integration ([source](https://github.com/ruaan-deysel/unraid-management-agent))
|
||||
|
||||
### 13. Third-Party Client Libraries
|
||||
|
||||
#### Python Client: `unraid-api` (PyPI)
|
||||
|
||||
**Author:** DomaLab (Ruaan Deysel)
|
||||
**Version:** 1.3.1 (as of Jan 2026)
|
||||
**Requirements:** Python 3.11+, Unraid 7.1.4+, API v4.21.0+
|
||||
|
||||
Features ([source](https://github.com/domalab/unraid-api-client), [source](https://unraid-api.domalab.net/)):
|
||||
- Async/await with aiohttp
|
||||
- Home Assistant ready (accepts external ClientSession)
|
||||
- Pydantic models for all responses
|
||||
- SSL auto-discovery
|
||||
- Redirect handling for myunraid.net
|
||||
|
||||
**Supported Operations:**
|
||||
- Docker: start/stop/restart containers
|
||||
- VMs: start/stop/force_stop/pause/resume
|
||||
- Array: start/stop, parity check (start/pause/resume/cancel), disk spin up/down
|
||||
- System: metrics, shares, UPS, services, plugins, log files, notifications
|
||||
- Custom GraphQL queries
|
||||
|
||||
#### Home Assistant Integration
|
||||
|
||||
`chris-mc1/unraid_api` (60 stars) - Full Home Assistant integration using the local GraphQL API ([source](https://github.com/chris-mc1/unraid_api)):
|
||||
- Monitors array state, disk status, temperatures
|
||||
- Docker container status
|
||||
- Network information
|
||||
- HACS compatible
|
||||
|
||||
#### Homey Smart Home
|
||||
|
||||
Unraid API integration available for the Homey smart home platform ([source](https://homey.app/no-no/app/community.unraid.api/Unraid-API/)).
|
||||
|
||||
#### Legacy APIs (Pre-GraphQL)
|
||||
|
||||
- `ElectricBrainUK/UnraidAPI` (127 stars) - Original Node.js API using web scraping ([source](https://github.com/ElectricBrainUK/UnraidAPI))
|
||||
- `BoKKeR/UnraidAPI-RE` (68 stars) - Reverse-engineered Node.js API ([source](https://github.com/BoKKeR/UnraidAPI-RE))
|
||||
- `ridenui/unraid` - TypeScript client via SSH ([source](https://github.com/ridenui/unraid))
|
||||
|
||||
### 14. Unraid Connect and Remote Access
|
||||
|
||||
Unraid Connect provides cloud-enabled server management ([source](https://docs.unraid.net/connect/), [source](https://unraid.net/connect)):
|
||||
|
||||
- **Dynamic Remote Access:** Toggle on/off server accessibility via UPnP
|
||||
- **Server Management:** Manage multiple servers from Connect web UI
|
||||
- **Deep Linking:** Links to relevant WebGUI sections
|
||||
- **Online Flash Backup:** Cloud-based configuration backups
|
||||
- **Real-time Monitoring:** Server health and resource usage monitoring
|
||||
- **Notifications:** Server health, storage status, critical events
|
||||
|
||||
The Connect plugin (`unraid-api-plugin-connect`) handles remote access, UPnP, dynamic DNS, and Mothership API communication ([source](https://deepwiki.com/unraid/api)).
|
||||
|
||||
### 15. Plugin Architecture
|
||||
|
||||
The API supports a plugin system for extending functionality ([source](https://deepwiki.com/unraid/api)):
|
||||
|
||||
- Plugins are NPM packages implementing the `UnraidPlugin` interface
|
||||
- Access to NestJS dependency injection
|
||||
- Can extend the GraphQL schema
|
||||
- Dynamic loading via `PluginLoaderService` at runtime
|
||||
- `@unraid/create-api-plugin` CLI scaffolding tool available
|
||||
- Plugin documentation at `api/docs/developer/api-plugins.md`
|
||||
|
||||
### 16. Feature Bounty Program
|
||||
|
||||
Unraid launched a **Feature Bounty Program** in September 2025 ([source](https://unraid.net/blog/api-feature-bounty-program)):
|
||||
|
||||
- Community developers implement specific API features for monetary rewards
|
||||
- Bounty board: `github.com/orgs/unraid/projects/3/views/1`
|
||||
- Accelerates feature development beyond core team capacity
|
||||
|
||||
**Notable Open Bounty: System Temperature Monitoring** ([source](https://github.com/unraid/api/issues/1597)):
|
||||
- Current API provides only disk temperatures via smartctl
|
||||
- Proposed comprehensive monitoring: CPU, motherboard, GPU, NVMe, chipset
|
||||
- Proposed GraphQL schema with TemperatureSensor, TemperatureSummary types
|
||||
- Would use lm-sensors, smartctl, nvidia-smi, IPMI
|
||||
|
||||
### 17. Monitoring and Grafana Integration
|
||||
|
||||
While the Unraid API does not natively expose Prometheus metrics, the community has established monitoring patterns ([source](https://unraid.net/blog/prometheus)):
|
||||
|
||||
- **Prometheus Node Exporter** plugin for Unraid
|
||||
- **Grafana dashboards** available:
|
||||
- Unraid System Dashboard V2 (ID: 7233) ([source](https://grafana.com/grafana/dashboards/7233-unraid-system-dashboard-v2/))
|
||||
- Unraid UPS Monitoring (ID: 19243) ([source](https://grafana.com/grafana/dashboards/19243-unraid-ups-monitoring/))
|
||||
- **cAdvisor** for container-level metrics
|
||||
|
||||
### 18. Development and Contribution
|
||||
|
||||
**Development Environment Requirements:**
|
||||
- Node.js 22.x (enforced)
|
||||
- pnpm 10.15.0
|
||||
- Bash, Docker, libvirt, jq
|
||||
|
||||
**Key Development Commands:**
|
||||
```bash
|
||||
pnpm dev # All dev servers in parallel
|
||||
pnpm build # Production builds
|
||||
pnpm codegen # Generate GraphQL types
|
||||
pnpm test # Run test suites (Vitest)
|
||||
pnpm lint # ESLint
|
||||
pnpm type-check # TypeScript checking
|
||||
```
|
||||
|
||||
**Deployment to Unraid:**
|
||||
```bash
|
||||
pnpm unraid:deploy <SERVER_IP>
|
||||
```
|
||||
|
||||
**CI/CD Pipeline:**
|
||||
1. PR previews with unique build URLs
|
||||
2. Staging deployment for merged PRs
|
||||
3. Production releases via release-please with semantic versioning
|
||||
|
||||
([source](https://github.com/unraid/api/blob/main/CLAUDE.md))
|
||||
|
||||
---
|
||||
|
||||
## Expert Opinions and Analysis
|
||||
|
||||
The DeepWiki auto-generated documentation characterizes the Unraid API as "a modern GraphQL API and web interface for managing Unraid servers" that "replaces portions of the legacy PHP-based WebGUI with a type-safe, real-time API built on NestJS and Vue 3, while maintaining backward compatibility through hybrid integration" ([source](https://deepwiki.com/unraid/api)).
|
||||
|
||||
The Feature Bounty Program blog post indicates Unraid is actively investing in the API ecosystem: "The new Unraid API has already come a long way as a powerful, open-source toolkit that unlocks endless possibilities for automation, integrations, and third-party applications" ([source](https://unraid.net/blog/api-feature-bounty-program)).
|
||||
|
||||
---
|
||||
|
||||
## Contradictions and Debates
|
||||
|
||||
1. **Code-first vs Schema-first:** The CLAUDE.md mentions "GraphQL schema-first approach with code generation" while the DeepWiki analysis describes a "code-first approach with NestJS decorators that generate the GraphQL schema." The DeepWiki analysis appears more accurate based on the `autoSchemaFile` configuration and NestJS decorator usage.
|
||||
|
||||
2. **File Manager API:** No dedicated file browser/upload/download API was found in the GraphQL schema. File operations appear to be handled through the legacy PHP WebGUI rather than the new API.
|
||||
|
||||
3. **RClone via API:** While our MCP server project has RClone tools, these appear to interface with rclone config files rather than a native GraphQL API for cloud storage management.
|
||||
|
||||
---
|
||||
|
||||
## Data Points and Statistics
|
||||
|
||||
| Metric | Value | Source |
|
||||
|--------|-------|--------|
|
||||
| Unraid API native since | v7.2.0 (2025-10-29) | [docs.unraid.net](https://docs.unraid.net/unraid-os/release-notes/7.2.0/) |
|
||||
| GitHub stars (official repo) | 86 | [github.com/unraid/api](https://github.com/unraid/api) |
|
||||
| Total releases | 102 | [github.com/unraid/api](https://github.com/unraid/api) |
|
||||
| Codebase language | TypeScript 77.4%, Vue 11.8%, PHP 5.6% | [github.com/unraid/api](https://github.com/unraid/api) |
|
||||
| Current package version | 4.29.2 | [deepwiki.com](https://deepwiki.com/unraid/api) |
|
||||
| Rate limit | 100 req/10 sec | [deepwiki.com](https://deepwiki.com/unraid/api/2.2-authentication-and-authorization) |
|
||||
| Python client PyPI version | 1.3.1 | [pypi.org](https://pypi.org/project/unraid-api/1.3.1/) |
|
||||
| Home Assistant integration stars | 60 | [github.com](https://github.com/chris-mc1/unraid_api) |
|
||||
| jmagar/unraid-mcp stars | 21 | [github.com](https://github.com/jmagar/unraid-mcp) |
|
||||
|
||||
---
|
||||
|
||||
## Gaps Identified
|
||||
|
||||
1. **Full GraphQL Schema Dump:** No publicly accessible introspection dump or SDL file was found. The live schema is only available via the GraphQL Sandbox on a running Unraid server.
|
||||
|
||||
2. **File Manager API:** No evidence of file browse/upload/download GraphQL mutations. This appears to remain in the PHP WebGUI layer.
|
||||
|
||||
3. **Temperature Monitoring:** Currently limited to disk temperatures via smartctl. Comprehensive temperature monitoring is an open feature bounty (not yet implemented).
|
||||
|
||||
4. **Parity/Array Operation Mutations:** While the Python client library implements `start_array()`/`stop_array()`, the specific GraphQL mutations and their schemas were not found in public documentation.
|
||||
|
||||
5. **RClone GraphQL API:** The extent of rclone integration through the GraphQL API versus legacy integration is unclear.
|
||||
|
||||
6. **Flash Backup API:** Flash backups appear to be handled through Unraid Connect (cloud-based) rather than a local GraphQL API.
|
||||
|
||||
7. **Network Configuration Mutations:** While network queries return interface data, mutations for VLAN/bonding configuration were not found in the API documentation.
|
||||
|
||||
8. **WebSocket Subscription Schema:** The specific subscription types and their exact GraphQL definitions are not publicly documented outside the running API.
|
||||
|
||||
9. **Plugin API Documentation:** The plugin developer guide (`api/docs/developer/api-plugins.md`) was not publicly accessible outside the repository.
|
||||
|
||||
10. **Rate Limiting Details:** Only the default rate (100 req/10 sec) was found; per-endpoint or per-role rate limits were not documented.
|
||||
|
||||
---
|
||||
|
||||
## All URLs Discovered
|
||||
|
||||
### Primary Sources (Official Unraid Documentation)
|
||||
- [Welcome to Unraid API](https://docs.unraid.net/API/) - API landing page
|
||||
- [Using the Unraid API](https://docs.unraid.net/API/how-to-use-the-api/) - Usage guide with examples
|
||||
- [API Key Authorization Flow](https://docs.unraid.net/API/api-key-app-developer-authorization-flow/) - Third-party auth flow
|
||||
- [Programmatic API Key Management](https://docs.unraid.net/API/programmatic-api-key-management/) - CLI key management
|
||||
- [CLI Reference](https://docs.unraid.net/API/cli) - Full CLI command reference
|
||||
- [OIDC Provider Setup](https://docs.unraid.net/API/oidc-provider-setup/) - SSO configuration
|
||||
- [Unraid 7.2.0 Release Notes](https://docs.unraid.net/unraid-os/release-notes/7.2.0/) - Release notes
|
||||
- [Automated Flash Backup](https://docs.unraid.net/connect/flash-backup/) - Flash backup docs
|
||||
- [Unraid Connect Overview](https://docs.unraid.net/connect/) - Connect service
|
||||
- [Remote Access](https://docs.unraid.net/unraid-connect/remote-access/) - Remote access docs
|
||||
- [Unraid Connect Setup](https://docs.unraid.net/unraid-connect/overview-and-setup/) - Setup guide
|
||||
- [Arrays Overview](https://docs.unraid.net/unraid-os/using-unraid-to/manage-storage/array/overview/) - Storage management
|
||||
- [ZFS Storage](https://docs.unraid.net/unraid-os/advanced-configurations/optimize-storage/zfs-storage/) - ZFS guide
|
||||
- [SMART Reports](https://docs.unraid.net/unraid-os/system-administration/monitor-performance/smart-reports-and-disk-health/) - Disk health
|
||||
- [User Management](https://docs.unraid.net/unraid-os/system-administration/secure-your-server/user-management/) - User system
|
||||
- [Array Health](https://docs.unraid.net/unraid-os/using-unraid-to/manage-storage/array/array-health-and-maintenance/) - Parity/maintenance
|
||||
- [VM Management](https://docs.unraid.net/unraid-os/manual/vm/vm-management/) - VM setup guide
|
||||
- [Plugins](https://docs.unraid.net/unraid-os/using-unraid-to/customize-your-experience/plugins/) - Plugin overview
|
||||
|
||||
### Official Source Code
|
||||
- [unraid/api GitHub](https://github.com/unraid/api) - Official monorepo (86 stars)
|
||||
- [unraid/api CLAUDE.md](https://github.com/unraid/api/blob/main/CLAUDE.md) - Development guidelines
|
||||
- [unraid/libvirt GitHub](https://github.com/unraid/libvirt) - Libvirt bindings
|
||||
- [unraid/api Issues](https://github.com/unraid/api/issues) - Issue tracker
|
||||
- [Temperature Monitoring Bounty](https://github.com/unraid/api/issues/1597) - Feature bounty issue
|
||||
- [API Feature Bounty Program](https://unraid.net/blog/api-feature-bounty-program) - Program announcement
|
||||
- [Unraid Connect](https://unraid.net/connect) - Connect product page
|
||||
- [Connect Dashboard](https://connect.myunraid.net/) - Live Connect dashboard
|
||||
|
||||
### Architecture Analysis (DeepWiki)
|
||||
- [Unraid API Overview](https://deepwiki.com/unraid/api) - Full architecture
|
||||
- [Backend API System](https://deepwiki.com/unraid/api/2-api-server) - Backend details
|
||||
- [GraphQL API Layer](https://deepwiki.com/unraid/api/2.1-graphql-api-layer) - GraphQL implementation
|
||||
- [Authentication and Authorization](https://deepwiki.com/unraid/api/2.2-authentication-and-authorization) - Auth system
|
||||
- [Core Services](https://deepwiki.com/unraid/api/2.4-docker-integration) - Docker/services
|
||||
- [Docker Management Service](https://deepwiki.com/unraid/api/2.4.2-notification-system) - Docker details
|
||||
- [Configuration Files](https://deepwiki.com/unraid/api/5.2-connect-settings-and-remote-access) - Config system
|
||||
|
||||
### Community Client Libraries
|
||||
- [domalab/unraid-api-client GitHub](https://github.com/domalab/unraid-api-client) - Python client
|
||||
- [unraid-api PyPI](https://pypi.org/project/unraid-api/1.3.1/) - PyPI package
|
||||
- [Unraid API Documentation (DomaLab)](https://unraid-api.domalab.net/) - Python docs
|
||||
- [Network and Storage Queries](https://deepwiki.com/domalab/unraid-api-client/4.3-network-and-storage-queries) - Query reference
|
||||
- [chris-mc1/unraid_api GitHub](https://github.com/chris-mc1/unraid_api) - Home Assistant integration (60 stars)
|
||||
- [Homey Unraid API](https://homey.app/no-no/app/community.unraid.api/Unraid-API/) - Homey integration
|
||||
|
||||
### MCP Server Listings
|
||||
- [jmagar/unraid-mcp GitHub](https://github.com/jmagar/unraid-mcp) - This project
|
||||
- [Glama MCP Listing](https://glama.ai/mcp/servers/@jmagar/unraid-mcp) - Glama listing
|
||||
- [MCP Market Listing](https://mcpmarket.com/server/unraid) - MCP Market
|
||||
- [MCPServers.com Listing](https://mcpservers.com/servers/jmagar-unraid) - MCPServers.com
|
||||
- [LangDB Listing](https://langdb.ai/app/mcp-servers/unraid-mcp-server-8605b018-ce29-48d5-8132-48cf0792501f) - LangDB
|
||||
- [UBOS Listing](https://ubos.tech/mcp/unraid-mcp-server/) - UBOS
|
||||
- [JuheAPI Listing](https://www.juheapi.com/mcp-servers/jmagar/unraid-mcp) - JuheAPI
|
||||
- [AIBase Listing](https://mcp.aibase.com/server/1916341265568079874) - AIBase
|
||||
- [lwsinclair/unraid-mcp GitHub](https://github.com/lwsinclair/unraid-mcp) - Alternative MCP
|
||||
|
||||
### Alternative/Legacy APIs
|
||||
- [ruaan-deysel/unraid-management-agent](https://github.com/ruaan-deysel/unraid-management-agent) - Go REST+WebSocket (5 stars)
|
||||
- [BoKKeR/UnraidAPI-RE](https://github.com/BoKKeR/UnraidAPI-RE) - Node.js API (68 stars)
|
||||
- [ElectricBrainUK/UnraidAPI](https://github.com/ElectricBrainUK/UnraidAPI) - Original API (127 stars)
|
||||
- [ridenui/unraid](https://github.com/ridenui/unraid) - TypeScript SSH client (3 stars)
|
||||
|
||||
### Monitoring Integration
|
||||
- [Unraid Prometheus Guide](https://unraid.net/blog/prometheus) - Official guide
|
||||
- [Grafana UPS Dashboard](https://grafana.com/grafana/dashboards/19243-unraid-ups-monitoring/) - Dashboard 19243
|
||||
- [Grafana System Dashboard V2](https://grafana.com/grafana/dashboards/7233-unraid-system-dashboard-v2/) - Dashboard 7233
|
||||
- [Prometheus/Grafana Forum Thread](https://forums.unraid.net/topic/77593-monitoring-unraid-with-prometheus-grafana-cadvisor-nodeexporter-and-alertmanager/) - Community guide
|
||||
|
||||
### Community Discussion
|
||||
- [Webhook Notification Forum Thread](https://forums.unraid.net/topic/88464-webhook-notification-method/) - Notification customization
|
||||
- [Matrix Notification Agent](https://forums.unraid.net/topic/122107-matrix-notification-agent/) - Matrix integration
|
||||
- [ntfy.sh Notifications](https://lder.dev/posts/ntfy-Notifications-With-unRAID/) - ntfy.sh setup
|
||||
- [MCP HomeLab Tutorial (YouTube)](https://www.youtube.com/watch?v=AydDDYn09QA) - Christian Lempa MCP tutorial
|
||||
- [Build with the Unraid API (YouTube)](https://www.youtube.com/shorts/0JJQdFfh4e0) - Short video
|
||||
@@ -1,824 +0,0 @@
|
||||
# Unraid API Research Findings
|
||||
|
||||
**Date:** 2026-02-07
|
||||
**Research Topic:** Unraid GraphQL API, Connect Cloud Service, MCP Integration
|
||||
**Specialist:** NotebookLM Deep Research
|
||||
**Notebook ID:** 4e217d5d-d68b-4bfa-881a-42f7c01d3e44
|
||||
|
||||
## Research Summary
|
||||
|
||||
- **Deep research mode:** deep (47 web sources discovered)
|
||||
- **Sources indexed:** 51 ready / 77 total (26 error)
|
||||
- **Q&A questions asked:** 23 comprehensive questions with follow-ups
|
||||
- **Deep research status:** completed
|
||||
- **Key source categories:** Official Unraid docs, GitHub repos, community forums, GraphQL references, third-party integrations
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Unraid API Overview](#1-unraid-api-overview)
|
||||
2. [Architecture and Deployment](#2-architecture-and-deployment)
|
||||
3. [Authentication and Security](#3-authentication-and-security)
|
||||
4. [GraphQL Schema and Endpoints](#4-graphql-schema-and-endpoints)
|
||||
5. [WebSocket Subscriptions](#5-websocket-subscriptions)
|
||||
6. [Unraid Connect Cloud Service](#6-unraid-connect-cloud-service)
|
||||
7. [Version History and API Changes](#7-version-history-and-api-changes)
|
||||
8. [Community Integrations](#8-community-integrations)
|
||||
9. [Known Issues and Limitations](#9-known-issues-and-limitations)
|
||||
10. [API Roadmap and Future Features](#10-api-roadmap-and-future-features)
|
||||
11. [Recommendations for unraid-mcp](#11-recommendations-for-unraid-mcp)
|
||||
12. [Source Bibliography](#12-source-bibliography)
|
||||
|
||||
---
|
||||
|
||||
## 1. Unraid API Overview
|
||||
|
||||
The **Unraid API** is a programmatic interface that provides automation, monitoring, and integration capabilities for Unraid servers. It uses a **GraphQL** interface, offering a modern, strongly-typed method for developers and third-party applications to interact directly with the Unraid operating system.
|
||||
|
||||
### Key Facts
|
||||
|
||||
- **Protocol:** GraphQL (queries, mutations, subscriptions)
|
||||
- **Endpoint:** `http(s)://[SERVER_IP]/graphql`
|
||||
- **Authentication:** API Keys, Session Cookies, SSO/OIDC
|
||||
- **Native since:** Unraid 7.2 (no plugin required)
|
||||
- **Pre-7.2:** Requires Unraid Connect plugin installation
|
||||
|
||||
The API exposes nearly all management functions available in the Unraid WebGUI, including server management, storage operations, Docker/VM lifecycle, remote access, and backup capabilities.
|
||||
|
||||
**Sources:**
|
||||
- [Welcome to Unraid API | Unraid Docs](https://docs.unraid.net/API/) -- Official API landing page [Tier: Primary]
|
||||
- [Using the Unraid API](https://docs.unraid.net/API/how-to-use-the-api/) -- Official usage guide [Tier: Primary]
|
||||
|
||||
---
|
||||
|
||||
## 2. Architecture and Deployment
|
||||
|
||||
### Monorepo Structure
|
||||
|
||||
The Unraid API is developed in the [unraid/api](https://github.com/unraid/api) monorepo which houses:
|
||||
|
||||
| Directory | Purpose |
|
||||
|-----------|---------|
|
||||
| `api/` | GraphQL backend server (TypeScript/Node.js) |
|
||||
| `web/` | Frontend interface (Nuxt/Vue.js) |
|
||||
| `plugin/` | Unraid plugin packaging (.plg format) |
|
||||
| `packages/` | Shared internal libraries |
|
||||
| `unraid-ui/` | UI component library |
|
||||
| `scripts/` | Build and maintenance utilities |
|
||||
|
||||
### Technology Stack
|
||||
|
||||
| Component | Technology |
|
||||
|-----------|------------|
|
||||
| Primary language | TypeScript (77.4%) |
|
||||
| Frontend | Vue.js (11.8%) via Nuxt |
|
||||
| Runtime | Node.js v22 |
|
||||
| Package manager | pnpm v9.0+ |
|
||||
| API protocol | GraphQL |
|
||||
| Dev environment | Nix (optional), Docker |
|
||||
| Build tool | Justfile |
|
||||
|
||||
### Deployment Modes
|
||||
|
||||
1. **Native (Unraid 7.2+):** API is built into the OS, starts automatically with the system. Managed via **Settings > Management Access > API**.
|
||||
2. **Plugin (Pre-7.2):** Requires installing the Unraid Connect plugin from Community Applications. Installing the plugin on 7.2+ provides access to newer API features before they are merged into the stable OS release.
|
||||
3. **Development:** Supports local Docker builds (`pnpm run docker:build-and-run` on port 5858), direct deployment to a running server (`pnpm unraid:deploy <SERVER_IP>`), and hot-reloading dev servers (API port 3001, Web port 3000).
|
||||
|
||||
### Integration with Nginx
|
||||
|
||||
The API integrates with Unraid's Nginx web server. Nginx acts as a reverse proxy, handling external requests on standard web ports (80/443) and routing `/graphql` traffic to the internal API backend. This means the API shares the same IP and port as the WebGUI.
|
||||
|
||||
**Sources:**
|
||||
- [GitHub - unraid/api: Unraid API / Connect / UI Monorepo](https://github.com/unraid/api) [Tier: Official]
|
||||
- [api/api/docs/developer/development.md](https://github.com/unraid/api/blob/main/api/docs/developer/development.md) [Tier: Official]
|
||||
|
||||
---
|
||||
|
||||
## 3. Authentication and Security
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
The Unraid API supports three primary authentication mechanisms:
|
||||
|
||||
1. **API Keys** -- Standard method for programmatic access
|
||||
- Created via WebGUI: **Settings > Management Access > API Keys**
|
||||
- Created via CLI: `unraid-api apikey --create --name "mykey" --roles ADMIN --json`
|
||||
- Sent in HTTP header: `x-api-key: YOUR_API_KEY`
|
||||
- Displayed only once upon creation
|
||||
|
||||
2. **Session Cookies** -- Used for browser-based WebGUI access
|
||||
- Automatic when logged into WebGUI
|
||||
- Used internally by the GraphQL Sandbox
|
||||
|
||||
3. **SSO / OIDC (OpenID Connect)** -- Enterprise identity management
|
||||
- Added in API v4.0.0
|
||||
- Supports external identity providers
|
||||
|
||||
### API Key Authorization Flow (OAuth-like)
|
||||
|
||||
For third-party applications, Unraid provides an OAuth-like authorization flow:
|
||||
|
||||
1. App redirects user to: `https://[server]/ApiKeyAuthorize?name=MyApp&scopes=docker:read,vm:*&redirect_uri=https://myapp.com/callback&state=abc123`
|
||||
2. User authenticates (if not already logged in)
|
||||
3. User sees consent screen with requested permissions
|
||||
4. Upon approval, API key is created and shown to user once
|
||||
5. If `redirect_uri` provided, user is redirected with `?api_key=xxx&state=abc123`
|
||||
|
||||
**Query Parameters:**
|
||||
| Parameter | Required | Description |
|
||||
|-----------|----------|-------------|
|
||||
| `name` | Yes | Application name |
|
||||
| `scopes` | Yes | Comma-separated permissions (e.g., `docker:read,vm:*`) |
|
||||
| `redirect_uri` | No | HTTPS callback URL (localhost allowed for dev) |
|
||||
| `state` | No | CSRF prevention token |
|
||||
|
||||
### Programmatic API Key Management (CLI)
|
||||
|
||||
```bash
|
||||
# Create a key with admin role
|
||||
unraid-api apikey --create --name "workflow key" --roles ADMIN --json
|
||||
|
||||
# Create a key with specific permissions
|
||||
unraid-api apikey --create --name "monitor" --permissions "DOCKER:READ_ANY,ARRAY:READ_ANY" --json
|
||||
|
||||
# Overwrite existing key
|
||||
unraid-api apikey --create --name "workflow key" --roles ADMIN --overwrite --json
|
||||
|
||||
# Delete a key
|
||||
unraid-api apikey --delete --name "workflow key"
|
||||
```
|
||||
|
||||
### Roles and Permissions
|
||||
|
||||
**Roles (pre-defined access levels):**
|
||||
|
||||
| Role | Description |
|
||||
|------|-------------|
|
||||
| `ADMIN` | Full system access (all permissions) |
|
||||
| `VIEWER` | Read-only access |
|
||||
| `GUEST` | Limited access |
|
||||
| `CONNECT` | Unraid Connect cloud features |
|
||||
|
||||
**Permission Scope Format:** `RESOURCE:ACTION`
|
||||
|
||||
**Available Resources:**
|
||||
- Core: `ACTIVATION_CODE`, `API_KEY`, `CONFIG`, `CUSTOMIZATIONS`, `INFO`, `LOGS`, `OS`, `REGISTRATION`, `VARS`, `WELCOME`
|
||||
- Storage: `ARRAY`, `DISK`, `FLASH`
|
||||
- Services: `DOCKER`, `VMS`, `SERVICES`, `NETWORK`
|
||||
- Management: `DASHBOARD`, `DISPLAY`, `ME`, `NOTIFICATIONS`, `OWNER`, `PERMISSION`, `SHARE`, `USER`
|
||||
- Cloud: `CLOUD`, `CONNECT`, `CONNECT__REMOTE_ACCESS`, `ONLINE`, `SERVERS`
|
||||
|
||||
**Available Actions:**
|
||||
- `CREATE_ANY`, `CREATE_OWN`
|
||||
- `READ_ANY`, `READ_OWN`
|
||||
- `UPDATE_ANY`, `UPDATE_OWN`
|
||||
- `DELETE_ANY`, `DELETE_OWN`
|
||||
- `*` (wildcard for all actions)
|
||||
|
||||
### SSL/TLS Certificate Handling
|
||||
|
||||
| Scenario | Recommendation |
|
||||
|----------|---------------|
|
||||
| Self-signed cert (local IP) | Either trust the specific CA or disable SSL verification (dev only) |
|
||||
| `myunraid.net` cert (Let's Encrypt) | SSL verification works normally; use the `myunraid.net` URL |
|
||||
| Strict SSL mode | Enforces HTTPS for all connections including local |
|
||||
|
||||
For self-signed certs in client code:
|
||||
```bash
|
||||
curl -k "https://your-unraid-server/graphql" -H "x-api-key: YOUR_KEY"
|
||||
```
|
||||
|
||||
**Sources:**
|
||||
- [API key authorization flow | Unraid Docs](https://docs.unraid.net/API/api-key-app-developer-authorization-flow/) [Tier: Primary]
|
||||
- [Programmatic API key management | Unraid Docs](https://docs.unraid.net/API/programmatic-api-key-management/) [Tier: Primary]
|
||||
|
||||
---
|
||||
|
||||
## 4. GraphQL Schema and Endpoints
|
||||
|
||||
### Endpoint URLs
|
||||
|
||||
| Purpose | URL |
|
||||
|---------|-----|
|
||||
| GraphQL API | `http(s)://[SERVER_IP]/graphql` |
|
||||
| GraphQL Sandbox | `http(s)://[SERVER_IP]/graphql` (must be enabled) |
|
||||
| WebSocket (subscriptions) | `ws(s)://[SERVER_IP]/graphql` |
|
||||
| Internal dev API | `http://localhost:3001/graphql` |
|
||||
|
||||
### Enabling the GraphQL Sandbox
|
||||
|
||||
Two methods:
|
||||
1. **WebGUI:** Settings > Management Access > Developer Options > Toggle GraphQL Sandbox to "On"
|
||||
2. **CLI:** `unraid-api developer --sandbox true`
|
||||
|
||||
Then access at `http://YOUR_SERVER_IP/graphql` to explore the schema via Apollo Sandbox.
|
||||
|
||||
### Query Types
|
||||
|
||||
#### System Information (`info`)
|
||||
```graphql
|
||||
query {
|
||||
info {
|
||||
os { platform distro release uptime hostname arch kernel }
|
||||
cpu { manufacturer brand cores threads }
|
||||
memory { layout { bank type clockSpeed manufacturer } }
|
||||
baseboard { manufacturer model version serial }
|
||||
system { manufacturer model version serial uuid }
|
||||
versions { kernel docker unraid node }
|
||||
apps { installed started }
|
||||
machineId
|
||||
time
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Array Status (`array`)
|
||||
```graphql
|
||||
query {
|
||||
array {
|
||||
id
|
||||
state
|
||||
capacity {
|
||||
kilobytes { free used total }
|
||||
disks { free used total }
|
||||
}
|
||||
boot { id name device size status temp fsType }
|
||||
parities { id name device size status temp numErrors }
|
||||
disks { id name device size status temp numReads numWrites numErrors }
|
||||
caches { id name device size status temp }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Docker Containers (`docker`)
|
||||
```graphql
|
||||
query {
|
||||
docker {
|
||||
containers(skipCache: false) {
|
||||
id names image state status autoStart
|
||||
ports { ip privatePort publicPort type }
|
||||
labels
|
||||
networkSettings
|
||||
mounts
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Virtual Machines (`vms`)
|
||||
```graphql
|
||||
query {
|
||||
vms {
|
||||
id
|
||||
domains {
|
||||
id name state uuid
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Network (`network`)
|
||||
```graphql
|
||||
query {
|
||||
network {
|
||||
id
|
||||
accessUrls { type name ipv4 ipv6 }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Registration (`registration`)
|
||||
```graphql
|
||||
query {
|
||||
registration {
|
||||
id type state expiration updateExpiration
|
||||
keyFile { location contents }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Settings (`settings`)
|
||||
```graphql
|
||||
query {
|
||||
settings {
|
||||
unified { values }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### System Variables (`vars`)
|
||||
```graphql
|
||||
query {
|
||||
vars {
|
||||
id version name timeZone security workgroup
|
||||
useSsl port portssl
|
||||
shareSmbEnabled shareNfsEnabled
|
||||
mdState mdVersion
|
||||
csrfToken
|
||||
# Many more fields available -- some have Int overflow issues
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### RClone Remotes (`rclone`)
|
||||
|
||||
```graphql
|
||||
query {
|
||||
rclone {
|
||||
remotes { name type parameters config }
|
||||
configForm(formOptions: { providerType: "s3" }) {
|
||||
id dataSchema uiSchema
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Notifications
|
||||
|
||||
```graphql
|
||||
query {
|
||||
notifications {
|
||||
id subject message importance unread
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Shares
|
||||
|
||||
```graphql
|
||||
query {
|
||||
shares {
|
||||
name comment free used
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Mutation Types
|
||||
|
||||
#### Docker Container Management
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
docker {
|
||||
start(id: $id) { id names state status }
|
||||
stop(id: $id) { id names state status }
|
||||
}
|
||||
}
|
||||
```
|
||||
- Uses `PrefixedID` type for container identification
|
||||
- Mutations are idempotent (starting an already-running container returns success)
|
||||
|
||||
#### VM Management
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
vm {
|
||||
start(id: $id) # Returns Boolean
|
||||
stop(id: $id)
|
||||
pause(id: $id)
|
||||
resume(id: $id)
|
||||
forceStop(id: $id)
|
||||
reboot(id: $id)
|
||||
reset(id: $id)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### RClone Remote Management
|
||||
```graphql
|
||||
mutation {
|
||||
rclone {
|
||||
createRCloneRemote(input: { name: "...", type: "s3", config: {...} }) {
|
||||
name type parameters
|
||||
}
|
||||
deleteRCloneRemote(input: { name: "..." })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### System Operations (via API)
|
||||
The following operations are confirmed available through the API (exact mutation names should be discovered via introspection):
|
||||
- Array start/stop
|
||||
- Parity check trigger
|
||||
- Server reboot/shutdown
|
||||
- Flash backup trigger
|
||||
- Notification management
|
||||
|
||||
### The `PrefixedID` Type
|
||||
|
||||
The API uses a `PrefixedID` custom scalar type for global object identification. This follows the GraphQL `Node` interface pattern, combining the object type and its internal ID (e.g., `DockerContainer:abc123`). Client libraries must handle this as a string.
|
||||
|
||||
### The `Long` Scalar Type
|
||||
|
||||
The API defines a custom `Long` scalar type for 64-bit integers to handle values that exceed the standard GraphQL `Int` (32-bit signed). This is used for:
|
||||
- Disk/array capacity values (size, free, used, total)
|
||||
- Memory values (total, free)
|
||||
- Disk operation counters (numReads, numWrites)
|
||||
|
||||
These are typically serialized as strings in JSON responses.
|
||||
|
||||
**Sources:**
|
||||
- [Welcome to Unraid API | Unraid Docs](https://docs.unraid.net/API/) [Tier: Primary]
|
||||
- [Using the Unraid API](https://docs.unraid.net/API/how-to-use-the-api/) [Tier: Primary]
|
||||
- [GitHub - jmagar/unraid-mcp](https://github.com/jmagar/unraid-mcp) [Tier: Official]
|
||||
|
||||
---
|
||||
|
||||
## 5. WebSocket Subscriptions
|
||||
|
||||
### Protocol
|
||||
|
||||
The Unraid API uses the **`graphql-transport-ws`** protocol (the modern standard, superseding the older `subscriptions-transport-ws`).
|
||||
|
||||
### Connection Flow
|
||||
|
||||
1. Client connects to `ws(s)://[SERVER_IP]/graphql`
|
||||
2. Client sends `connection_init` with auth payload:
|
||||
```json
|
||||
{
|
||||
"type": "connection_init",
|
||||
"payload": {
|
||||
"x-api-key": "YOUR_API_KEY"
|
||||
}
|
||||
}
|
||||
```
|
||||
3. Server responds with `connection_ack`
|
||||
4. Client sends `subscribe` message with GraphQL subscription query
|
||||
5. Server streams `next` messages with data as events occur
|
||||
6. Server sends `complete` when subscription ends
|
||||
|
||||
### Known Subscription Types
|
||||
|
||||
| Subscription | Purpose |
|
||||
|-------------|---------|
|
||||
| `syslog` / `logFile` | Real-time system log streaming |
|
||||
| Array events | State changes, parity check progress |
|
||||
| Docker events | Container state changes |
|
||||
| Notifications | Real-time alert streaming |
|
||||
|
||||
### Authentication for WebSockets
|
||||
|
||||
Since standard WebSocket APIs in browsers cannot set custom headers, the API key is passed in the `connectionParams` payload of the `connection_init` message. Alternatively, session cookies work automatically for WebGUI-based tools.
|
||||
|
||||
### Infrastructure Notes
|
||||
|
||||
- Unraid uses **Nchan** (Nginx module) for WebSocket connections internally
|
||||
- Unraid 7.0.1 fixed Nchan memory leaks affecting subscription stability
|
||||
- Unraid 7.1.0 added automatic Nchan shared memory recovery (restarts Nginx when memory runs out)
|
||||
- A setting was added in 7.1.0 to disable real-time updates on inactive browsers to prevent memory issues
|
||||
|
||||
**Sources:**
|
||||
- [Subscriptions - GraphQL](https://graphql.org/learn/subscriptions/) [Tier: Primary]
|
||||
- [Subscriptions - Apollo GraphQL Docs](https://www.apollographql.com/docs/react/data/subscriptions) [Tier: Official]
|
||||
|
||||
---
|
||||
|
||||
## 6. Unraid Connect Cloud Service
|
||||
|
||||
### Overview
|
||||
|
||||
**Unraid Connect** is a cloud-enabled companion service that functions as a centralized "remote command center" for Unraid servers. It provides:
|
||||
|
||||
- **Centralized Dashboard:** View status, uptime, storage, and license details for multiple servers
|
||||
- **Remote Management:** Start/stop arrays, manage Docker/VMs, reboot servers
|
||||
- **Flash Backup:** Automated cloud-based backups of USB flash drive configuration
|
||||
- **Deep Linking:** Jump directly from cloud dashboard to local WebGUI pages
|
||||
|
||||
### Relationship to Local API
|
||||
|
||||
- Pre-7.2: The Unraid Connect plugin provides both cloud features AND the local GraphQL API
|
||||
- Post-7.2: The API is native to the OS; the Connect plugin adds cloud features
|
||||
- The cloud dashboard communicates through a secure tunnel to execute commands locally
|
||||
|
||||
### Data Transmitted to Cloud
|
||||
|
||||
The local server transmits to `Unraid.net`:
|
||||
- Server hostname and keyfile details
|
||||
- Local/remote IP addresses
|
||||
- Array usage statistics (numbers only, no file names)
|
||||
- Container and VM counts
|
||||
|
||||
**Privacy:** The service explicitly does NOT collect or share user content, file details, or personal information beyond necessary metadata.
|
||||
|
||||
### Remote Access Mechanisms
|
||||
|
||||
1. **Dynamic Remote Access (Recommended):**
|
||||
- On-demand; WebGUI closed to internet by default
|
||||
- Uses UPnP for automatic port forwarding (or manual rules)
|
||||
- Port lease expires after inactivity (~10 minutes)
|
||||
|
||||
2. **Static Remote Access:**
|
||||
- Always-on; WebGUI continuously accessible
|
||||
- Requires forwarding WAN port (high random number >1000) to HTTPS port
|
||||
|
||||
3. **VPN Alternatives:**
|
||||
- WireGuard (built-in)
|
||||
- Tailscale (native since Unraid 7.0+)
|
||||
|
||||
### Flash Backup Details
|
||||
|
||||
- Configuration files are encrypted and uploaded
|
||||
- Excludes sensitive data: passwords, WireGuard keys
|
||||
- Retained as latest backup only; older/inactive backups are purged
|
||||
- Can be triggered and monitored via the API
|
||||
|
||||
**Sources:**
|
||||
- [Unraid Connect overview & setup | Unraid Docs](https://docs.unraid.net/connect/about/) [Tier: Primary]
|
||||
- [Remote access | Unraid Docs](https://docs.unraid.net/connect/remote-access/) [Tier: Primary]
|
||||
- [Automated flash backup | Unraid Docs](https://docs.unraid.net/connect/flash-backup/) [Tier: Primary]
|
||||
|
||||
---
|
||||
|
||||
## 7. Version History and API Changes
|
||||
|
||||
### Unraid 7.0.0 (2025-01-09)
|
||||
|
||||
**Developer & System Capabilities:**
|
||||
- Notification agents stored as individual XML files (easier programmatic management)
|
||||
- `Content-Security-Policy frame-ancestors` support (iframe embedding for dashboards)
|
||||
- JavaScript console logging restored
|
||||
- VM Manager inline XML mode (read-only libvirt XML view)
|
||||
- Docker PID limits (default 2048)
|
||||
- Full ZFS support (hybrid pools, subpools, encryption)
|
||||
- Native Tailscale integration
|
||||
- File Manager merged into core OS
|
||||
- QEMU snapshots and clones for VMs
|
||||
|
||||
**Note:** API was still plugin-based (Unraid Connect plugin required).
|
||||
|
||||
### Unraid 7.0.1 (2025-02-25)
|
||||
|
||||
- **Nchan memory leak fix** -- Critical for WebSocket subscription stability
|
||||
- Tailscale integration security restrictions for Host-network containers
|
||||
|
||||
### Unraid 7.1.0 (2025-05-05)
|
||||
|
||||
- **Nchan shared memory recovery** -- Automatic Nginx restart on memory exhaustion
|
||||
- **Real-time updates toggle** -- Disable updates on inactive browsers
|
||||
- Native WiFi support (`wlan0`) -- New network interface data
|
||||
- User VM templates (create, export, import)
|
||||
- CSS rework for WebGUI
|
||||
|
||||
### Unraid 7.2.0 (Stable Release)
|
||||
|
||||
**Major Milestone: API becomes native to the OS.**
|
||||
|
||||
- No plugin required for local API access
|
||||
- API starts automatically with system
|
||||
- Deep system integration
|
||||
- Settings accessible at **Settings > Management Access > API**
|
||||
- OIDC/SSO support added
|
||||
- Permissions system rewritten (API v4.0.0)
|
||||
- Built-in GraphQL Sandbox
|
||||
- CLI key management (`unraid-api apikey`)
|
||||
- Open-sourced API code
|
||||
|
||||
**Sources:**
|
||||
- [Version 7.0.0 | Unraid Docs](https://docs.unraid.net/unraid-os/release-notes/7.0.0/) [Tier: Primary]
|
||||
- [Version 7.0.1 | Unraid Docs](https://docs.unraid.net/unraid-os/release-notes/7.0.1/) [Tier: Primary]
|
||||
- [Version 7.1.0 | Unraid Docs](https://docs.unraid.net/unraid-os/release-notes/7.1.0/) [Tier: Primary]
|
||||
- [Unraid 7.2.0 Blog Post](https://unraid.net/blog/unraid-7-2-0) [Tier: Official]
|
||||
|
||||
---
|
||||
|
||||
## 8. Community Integrations
|
||||
|
||||
### Third-Party Projects Using the Unraid API
|
||||
|
||||
#### 1. unraid-mcp (Python MCP Server) -- This Project
|
||||
- **Interface:** Official Unraid GraphQL API via HTTP/HTTPS + WebSockets
|
||||
- **Auth:** `UNRAID_API_URL` + `UNRAID_API_KEY` environment variables
|
||||
- **Transport:** HTTP header `X-API-Key` for queries; WebSocket `connection_init` payload for subscriptions
|
||||
- **Tools:** 26+ MCP tools for Docker, VM, storage, system management
|
||||
|
||||
#### 2. PSUnraid (PowerShell Module)
|
||||
- **Developer:** Community member "Jagula"
|
||||
- **Status:** Alpha / proof of concept
|
||||
- **Interface:** Official Unraid GraphQL API
|
||||
- **Install:** `Install-Module PSUnraid`
|
||||
- **Capabilities:** Server/array/disk status, Docker/VM start/stop/restart, notifications
|
||||
- **Requires:** Unraid 7.2.2+ for full feature support
|
||||
- **Key insight:** Remote-only (no SSH needed), converts JSON to PowerShell objects
|
||||
|
||||
#### 3. unraid-management-agent (Go Plugin)
|
||||
- **Interface:** **NOT** the official GraphQL API -- independent REST API + WebSocket
|
||||
- **Port:** Default 8043
|
||||
- **Architecture:** Standalone Go binary, collects data via native libraries
|
||||
- **Endpoints:** 50+ REST endpoints, `/metrics` for Prometheus, WebSocket at `/api/v1/ws`
|
||||
- **Integrations:** Prometheus (41 metrics), MQTT, Home Assistant (auto-discovery), MCP (54 tools)
|
||||
- **Key insight:** Provides data the official API lacks (SMART data, container logs, process monitoring, GPU stats, UPS data)
|
||||
|
||||
#### 4. unraid-ssh-mcp
|
||||
- **Interface:** SSH (explicitly chose NOT to use GraphQL API)
|
||||
- **Reason:** API lacked container logs, SMART data, real-time CPU load, process monitoring
|
||||
- **Advantage:** Works on any Unraid version, no rate limits
|
||||
|
||||
#### Other Projects
|
||||
- **U-Manager:** Android app for remote Unraid management
|
||||
- **Unraid Deck:** Native iOS client (SwiftUI)
|
||||
- **hass-unraid:** Home Assistant integration with SMART attribute notifications
|
||||
|
||||
**Sources:**
|
||||
- [PSUnraid Reddit Thread](https://www.reddit.com/r/unRAID/comments/1ph08wi/psunraid_powershell_m) [Tier: Community]
|
||||
- [unraid-management-agent GitHub](https://github.com/ruaan-deysel/unraid-management-agent) [Tier: Official]
|
||||
- [Unraid MCP Reddit Thread](https://www.reddit.com/r/unRAID/comments/1pl4s4j/unraid_mcp_server_que) [Tier: Community]
|
||||
|
||||
---
|
||||
|
||||
## 9. Known Issues and Limitations
|
||||
|
||||
### GraphQL Schema Issues (Discovered in unraid-mcp Development)
|
||||
|
||||
Based on the existing unraid-mcp codebase, the following issues have been encountered:
|
||||
|
||||
1. **Int Overflow on Large Values:** Memory size fields (total, used, free) and some disk operation counters can overflow GraphQL's standard 32-bit `Int` type. The API uses a custom `Long` scalar but some fields still return problematic values.
|
||||
|
||||
2. **NaN Values:** Certain fields in the `vars` query (e.g., `sysArraySlots`, `sysCacheSlots`, `cacheNumDevices`, `cacheSbNumDisks`) can return NaN, causing type errors. The existing codebase works around this by querying a curated subset of fields.
|
||||
|
||||
3. **Non-nullable Fields Returning Null:** The `info.devices` section has non-nullable fields that may be null in practice. The codebase avoids querying this section entirely.
|
||||
|
||||
4. **Memory Layout Size Missing:** Individual memory stick `size` values are not returned by the API, preventing total memory calculation from layout data.
|
||||
|
||||
### API Coverage Gaps
|
||||
|
||||
According to the unraid-ssh-mcp developer, the GraphQL API currently lacks:
|
||||
- Docker container logs
|
||||
- Detailed SMART data for drives
|
||||
- Real-time CPU load averages
|
||||
- Process monitoring capabilities
|
||||
- Some system-level metrics available via `/proc` and `/sys`
|
||||
|
||||
### General Limitations
|
||||
|
||||
- **Rate Limiting:** The API implements rate limiting (specific limits not documented publicly)
|
||||
- **Version Dependency:** Full API requires Unraid 7.2+; pre-7.2 versions need the Connect plugin
|
||||
- **Self-Signed Certificates:** Client must handle SSL verification for local IP access
|
||||
- **Schema Volatility:** The API schema is still evolving; field names and types may change between versions
|
||||
|
||||
---
|
||||
|
||||
## 10. API Roadmap and Future Features
|
||||
|
||||
### Completed (as of 7.2)
|
||||
- API native to Unraid OS
|
||||
- Separated from Connect Plugin
|
||||
- Open-sourced
|
||||
- OIDC/SSO support
|
||||
- Permissions system rewrite (v4.0.0)
|
||||
|
||||
### Q1 2025
|
||||
- New Connect Settings Interface
|
||||
|
||||
### Q2 2025
|
||||
- New modernized Settings Pages
|
||||
- Storage Pool Creation Interface (simplified)
|
||||
- Storage Pool Status Interface (real-time)
|
||||
- Developer Tools for Plugins
|
||||
- Custom Theme Creator (start)
|
||||
|
||||
### Q3 2025
|
||||
- Custom Theme Creator (completion)
|
||||
- New Docker Status Interface
|
||||
- Docker Container Setup Interface (streamlined)
|
||||
- New Plugins Interface (redesigned)
|
||||
|
||||
### TBD (Planned but Unscheduled)
|
||||
- **Native Docker Compose support** -- Highly anticipated
|
||||
- Plugin Development SDK and tooling
|
||||
- Advanced Plugin Management interface
|
||||
- Storage Share Creation & Settings interfaces
|
||||
- Storage Share Management Dashboard
|
||||
|
||||
### In Development
|
||||
- User Interface Component Library (security components)
|
||||
|
||||
**Sources:**
|
||||
- [Roadmap & Features | Unraid Docs](https://docs.unraid.net/API/upcoming-features/) [Tier: Primary]
|
||||
|
||||
---
|
||||
|
||||
## 11. Recommendations for unraid-mcp
|
||||
|
||||
Based on this research, the following improvements are recommended for the unraid-mcp project:
|
||||
|
||||
### High Priority
|
||||
|
||||
1. **ZFS/Pool Management Tools**
|
||||
- Add `get_pool_status` for ZFS/BTRFS storage pools
|
||||
- Current `get_array_status` insufficient for multi-pool setups introduced in Unraid 7.0
|
||||
|
||||
2. **Scope-Based Tool Filtering**
|
||||
- Before registering tools with MCP, verify the API key has appropriate permissions
|
||||
- Prevent exposing tools the key cannot use (avoid hallucinated capabilities)
|
||||
- Query current key permissions at startup
|
||||
|
||||
3. **Improved Error Handling**
|
||||
- Implement exponential backoff for rate limit errors (HTTP 429)
|
||||
- Better handling of `Long` scalar type values
|
||||
- Graceful degradation for unavailable schema fields
|
||||
|
||||
4. **API Key Authorization Flow**
|
||||
- Consider implementing the OAuth-like flow (`/ApiKeyAuthorize`) for user-friendly key generation
|
||||
- Enables scope-based consent before key creation
|
||||
|
||||
### Medium Priority
|
||||
|
||||
5. **Real-Time Notification Streaming**
|
||||
- Add WebSocket subscription for notifications
|
||||
- Allows proactive alerting (e.g., "Disk 5 is overheating") without user request
|
||||
|
||||
6. **File Manager Integration**
|
||||
- Add `list_files`, `read_file` tools using the native File Manager API (merged in 7.0)
|
||||
- Enables LLM to organize media or clean up `appdata`
|
||||
|
||||
7. **Pagination for Large Queries**
|
||||
- Implement `limit` and `offset` for log listings and file browsing
|
||||
- Prevent timeouts from massive result sets
|
||||
|
||||
8. **Flash Backup Trigger**
|
||||
- Add tool to trigger flash backup via API mutation
|
||||
- Monitor backup status
|
||||
|
||||
### Low Priority
|
||||
|
||||
9. **VM Snapshot Management**
|
||||
- Add `create_vm_snapshot`, `revert_to_snapshot`, `clone_vm`
|
||||
- Leverages QEMU snapshot support from Unraid 7.0
|
||||
|
||||
10. **Tailscale/VPN Status**
|
||||
- Query network schemas for Tailnet IPs and VPN connection status
|
||||
- Useful for remote management diagnostics
|
||||
|
||||
11. **Query Complexity Optimization**
|
||||
- Separate list queries (lightweight) from detail queries (heavy)
|
||||
- `list_docker_containers` should fetch only id/names/state
|
||||
- Detail queries should be on-demand
|
||||
|
||||
### Implementation Notes
|
||||
|
||||
- **GraphQL Sandbox Discovery:** Use the built-in sandbox at `http://SERVER/graphql` to discover exact mutation names and field types for any new tools
|
||||
- **Version Compatibility:** Consider checking the Unraid API version at startup and adjusting available tools accordingly
|
||||
- **SSL Configuration:** The `UNRAID_VERIFY_SSL` environment variable is already implemented -- ensure documentation guides users toward `myunraid.net` certificates for proper SSL
|
||||
- **PrefixedID Handling:** Container and VM IDs use the `PrefixedID` custom scalar -- ensure all ID-based operations handle this string type correctly
|
||||
|
||||
---
|
||||
|
||||
## 12. Source Bibliography
|
||||
|
||||
### Primary Sources (Official Documentation)
|
||||
- [Welcome to Unraid API | Unraid Docs](https://docs.unraid.net/API/)
|
||||
- [Using the Unraid API](https://docs.unraid.net/API/how-to-use-the-api/)
|
||||
- [API key authorization flow | Unraid Docs](https://docs.unraid.net/API/api-key-app-developer-authorization-flow/)
|
||||
- [Programmatic API key management | Unraid Docs](https://docs.unraid.net/API/programmatic-api-key-management/)
|
||||
- [Roadmap & Features | Unraid Docs](https://docs.unraid.net/API/upcoming-features/)
|
||||
- [Unraid Connect overview & setup | Unraid Docs](https://docs.unraid.net/connect/about/)
|
||||
- [Remote access | Unraid Docs](https://docs.unraid.net/connect/remote-access/)
|
||||
- [Automated flash backup | Unraid Docs](https://docs.unraid.net/connect/flash-backup/)
|
||||
- [Version 7.0.0 Release Notes](https://docs.unraid.net/unraid-os/release-notes/7.0.0/)
|
||||
- [Version 7.0.1 Release Notes](https://docs.unraid.net/unraid-os/release-notes/7.0.1/)
|
||||
- [Version 7.1.0 Release Notes](https://docs.unraid.net/unraid-os/release-notes/7.1.0/)
|
||||
|
||||
### Official / GitHub Sources
|
||||
- [GitHub - unraid/api: Unraid API / Connect / UI Monorepo](https://github.com/unraid/api)
|
||||
- [GitHub - jmagar/unraid-mcp](https://github.com/jmagar/unraid-mcp)
|
||||
- [api/docs/developer/development.md](https://github.com/unraid/api/blob/main/api/docs/developer/development.md)
|
||||
- [Unraid OS 7.2.0 Blog Post](https://unraid.net/blog/unraid-7-2-0)
|
||||
|
||||
### Community Sources
|
||||
- [PSUnraid PowerShell Module (Reddit)](https://www.reddit.com/r/unRAID/comments/1ph08wi/psunraid_powershell_m)
|
||||
- [Unraid MCP Server (Reddit)](https://www.reddit.com/r/unRAID/comments/1pl4s4j/unraid_mcp_server_que)
|
||||
- [unraid-management-agent (GitHub)](https://github.com/ruaan-deysel/unraid-management-agent)
|
||||
- [Unraid API Discussion (Reddit)](https://www.reddit.com/r/unRAID/comments/1h7xkjr/unraid_api/)
|
||||
- [API Key Location Question (Reddit)](https://www.reddit.com/r/unRAID/comments/1nk2jjk/i_couldnt_find_the_ap)
|
||||
|
||||
### Reference Sources
|
||||
- [GraphQL Specification](https://spec.graphql.org/)
|
||||
- [Learn GraphQL](https://graphql.org/learn/)
|
||||
- [GraphQL Subscriptions](https://graphql.org/learn/subscriptions/)
|
||||
- [Apollo GraphQL Sandbox](https://www.apollographql.com/docs/graphos/platform/sandbox)
|
||||
- [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction)
|
||||
|
||||
---
|
||||
|
||||
## Cross-Source Analysis
|
||||
|
||||
### Where Sources Agree
|
||||
- The API is GraphQL-based with queries, mutations, and subscriptions
|
||||
- Unraid 7.2 is the version where API became native
|
||||
- API Keys are the primary authentication method for programmatic access
|
||||
- The endpoint is at `/graphql` on the server
|
||||
- The API supports Docker/VM lifecycle management
|
||||
- The monorepo is TypeScript/Node.js based
|
||||
|
||||
### Where Sources Disagree or Have Gaps
|
||||
- **Exact mutation names** are not documented publicly -- must use GraphQL Sandbox introspection
|
||||
- **Rate limit specifics** (thresholds, headers) are not publicly documented
|
||||
- **Container logs** -- the unraid-ssh-mcp developer claims they're unavailable via API, but this may have changed in newer versions
|
||||
- **Schema type issues** (Int overflow, NaN) are documented only in the unraid-mcp codebase, not in official docs
|
||||
|
||||
### Notable Insights
|
||||
1. The unraid-management-agent project provides capabilities the official API lacks, suggesting areas for API expansion
|
||||
2. PSUnraid confirms the API schema includes mutations for Docker/VM lifecycle with boolean return types
|
||||
3. The OAuth-like authorization flow is a sophisticated feature not commonly found in self-hosted server APIs
|
||||
4. The `Long` scalar type and `PrefixedID` type are custom additions critical for proper client implementation
|
||||
@@ -1,998 +0,0 @@
|
||||
# Unraid API Source Code Analysis
|
||||
|
||||
> **Research Date:** 2026-02-07
|
||||
> **Repository:** https://github.com/unraid/api
|
||||
> **Latest Version:** v4.29.2 (December 19, 2025)
|
||||
> **License:** Open-sourced January 2025
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Repository Structure](#1-repository-structure)
|
||||
2. [Technology Stack](#2-technology-stack)
|
||||
3. [GraphQL Schema & Type System](#3-graphql-schema--type-system)
|
||||
4. [Authentication & Authorization](#4-authentication--authorization)
|
||||
5. [Resolver Implementations](#5-resolver-implementations)
|
||||
6. [Subscription System](#6-subscription-system)
|
||||
7. [State Management](#7-state-management)
|
||||
8. [Plugin Architecture](#8-plugin-architecture)
|
||||
9. [Release History](#9-release-history)
|
||||
10. [Roadmap & Upcoming Features](#10-roadmap--upcoming-features)
|
||||
11. [Open Issues & Community Requests](#11-open-issues--community-requests)
|
||||
12. [Community Projects & Integrations](#12-community-projects--integrations)
|
||||
13. [Architectural Insights for unraid-mcp](#13-architectural-insights-for-unraid-mcp)
|
||||
|
||||
---
|
||||
|
||||
## 1. Repository Structure
|
||||
|
||||
The Unraid API is a **monorepo** managed with pnpm workspaces containing eight interconnected packages:
|
||||
|
||||
```
|
||||
unraid/api/
|
||||
├── api/ # NestJS GraphQL backend (port 3001)
|
||||
│ ├── src/
|
||||
│ │ ├── __test__/
|
||||
│ │ ├── common/ # Shared utilities
|
||||
│ │ ├── core/ # Core infrastructure
|
||||
│ │ │ ├── errors/
|
||||
│ │ │ ├── modules/
|
||||
│ │ │ ├── notifiers/
|
||||
│ │ │ ├── types/
|
||||
│ │ │ ├── utils/
|
||||
│ │ │ ├── log.ts
|
||||
│ │ │ └── pubsub.ts # PubSub for GraphQL subscriptions
|
||||
│ │ ├── i18n/ # Internationalization
|
||||
│ │ ├── mothership/ # Unraid Connect relay communication
|
||||
│ │ ├── store/ # Redux state management
|
||||
│ │ │ ├── actions/
|
||||
│ │ │ ├── listeners/
|
||||
│ │ │ ├── modules/
|
||||
│ │ │ ├── services/
|
||||
│ │ │ ├── state-parsers/
|
||||
│ │ │ ├── watch/
|
||||
│ │ │ └── root-reducer.ts
|
||||
│ │ ├── types/
|
||||
│ │ ├── unraid-api/ # Main API implementation
|
||||
│ │ │ ├── app/
|
||||
│ │ │ ├── auth/ # Authentication system
|
||||
│ │ │ ├── cli/
|
||||
│ │ │ ├── config/
|
||||
│ │ │ ├── cron/
|
||||
│ │ │ ├── decorators/
|
||||
│ │ │ ├── exceptions/
|
||||
│ │ │ ├── graph/ # GraphQL resolvers & services
|
||||
│ │ │ ├── nginx/
|
||||
│ │ │ ├── observers/
|
||||
│ │ │ ├── organizer/
|
||||
│ │ │ ├── plugin/
|
||||
│ │ │ ├── rest/ # REST API endpoints
|
||||
│ │ │ ├── shared/
|
||||
│ │ │ ├── types/
|
||||
│ │ │ ├── unraid-file-modifier/
|
||||
│ │ │ └── utils/
|
||||
│ │ ├── upnp/ # UPnP protocol
|
||||
│ │ ├── cli.ts
|
||||
│ │ ├── consts.ts
|
||||
│ │ ├── environment.ts
|
||||
│ │ └── index.ts
|
||||
│ ├── generated-schema.graphql # Auto-generated GraphQL schema
|
||||
│ ├── codegen.ts # GraphQL code generation config
|
||||
│ ├── Dockerfile
|
||||
│ └── docker-compose.yml
|
||||
├── web/ # Nuxt 3 frontend (Vue 3)
|
||||
│ ├── composables/gql/ # GraphQL composables
|
||||
│ ├── layouts/
|
||||
│ ├── src/
|
||||
│ └── codegen.ts
|
||||
├── unraid-ui/ # Vue 3 component library
|
||||
├── plugin/ # Plugin packaging
|
||||
├── packages/
|
||||
│ ├── unraid-shared/ # Shared types & utilities
|
||||
│ │ └── src/
|
||||
│ │ ├── pubsub/ # PubSub channel definitions
|
||||
│ │ ├── types/
|
||||
│ │ ├── graphql-enums.ts # AuthAction, Resource, Role enums
|
||||
│ │ ├── graphql.model.ts
|
||||
│ │ └── use-permissions.directive.ts
|
||||
│ ├── unraid-api-plugin-connect/
|
||||
│ ├── unraid-api-plugin-generator/
|
||||
│ └── unraid-api-plugin-health/
|
||||
├── scripts/
|
||||
├── pnpm-workspace.yaml
|
||||
├── .nvmrc # Node.js v22
|
||||
└── flake.nix # Nix dev environment
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. Technology Stack
|
||||
|
||||
### Backend
|
||||
| Component | Technology | Version |
|
||||
|-----------|-----------|---------|
|
||||
| Runtime | Node.js | v22 |
|
||||
| Framework | NestJS | 11.1.6 |
|
||||
| HTTP Server | Fastify | 5.5.0 |
|
||||
| GraphQL | Apollo Server | 4.12.2 |
|
||||
| Package Manager | pnpm | 10.15.0 |
|
||||
| Build Tool | Vite | 7.1.3 |
|
||||
| Test Framework | Vitest | 3.2.4 |
|
||||
| Docker SDK | Dockerode | 4.0.7 |
|
||||
| VM Management | @unraid/libvirt | 2.1.0 |
|
||||
| System Info | systeminformation | 5.27.8 |
|
||||
| File Watcher | Chokidar | 4.0.3 |
|
||||
| Auth RBAC | Casbin + nest-authz | 5.38.0 |
|
||||
| Auth Passport | Passport.js | Multiple strategies |
|
||||
| State Mgmt | Redux Toolkit | - |
|
||||
| Subscriptions | graphql-subscriptions | PubSub with EventEmitter |
|
||||
|
||||
### Frontend
|
||||
| Component | Technology | Version |
|
||||
|-----------|-----------|---------|
|
||||
| Framework | Vue 3 + Nuxt | 3.5.20 |
|
||||
| GraphQL Client | Apollo Client | 3.14.0 |
|
||||
| State | Pinia | 3.0.3 |
|
||||
| Styling | Tailwind CSS | v4 |
|
||||
|
||||
### Key Patterns
|
||||
- **Schema-first GraphQL** (migrating to code-first with NestJS decorators)
|
||||
- NestJS dependency injection with decorators
|
||||
- TypeScript ESM imports (`.js` extensions)
|
||||
- Redux for ephemeral runtime state synced with INI config files
|
||||
- Chokidar filesystem watchers for reactive config synchronization
|
||||
|
||||
---
|
||||
|
||||
## 3. GraphQL Schema & Type System
|
||||
|
||||
### Custom Scalars
|
||||
- `DateTime` - ISO date/time
|
||||
- `BigInt` - Large integer values
|
||||
- `JSON` - Arbitrary JSON data
|
||||
- `Port` - Network port numbers
|
||||
- `URL` - URL strings
|
||||
- `PrefixedID` - Server-prefixed identifiers (format: `server-prefix:uuid`)
|
||||
|
||||
### Core Enums
|
||||
|
||||
#### ArrayState
|
||||
```
|
||||
STARTED, STOPPED, NEW_ARRAY, RECON_DISK, DISABLE_DISK,
|
||||
SWAP_DSBL, INVALID_EXPANSION, PARITY_NOT_BIGGEST,
|
||||
TOO_MANY_MISSING_DISKS, NEW_DISK_TOO_SMALL, NO_DATA_DISKS
|
||||
```
|
||||
|
||||
#### ArrayDiskStatus
|
||||
```
|
||||
DISK_NP, DISK_OK, DISK_NP_MISSING, DISK_INVALID, DISK_WRONG,
|
||||
DISK_DSBL, DISK_NP_DSBL, DISK_DSBL_NEW, DISK_NEW
|
||||
```
|
||||
|
||||
#### ArrayDiskType
|
||||
```
|
||||
DATA, PARITY, FLASH, CACHE
|
||||
```
|
||||
|
||||
#### ArrayDiskFsColor
|
||||
```
|
||||
GREEN_ON, GREEN_BLINK, BLUE_ON, BLUE_BLINK,
|
||||
YELLOW_ON, YELLOW_BLINK, RED_ON, RED_OFF, GREY_OFF
|
||||
```
|
||||
|
||||
#### ContainerState
|
||||
```
|
||||
RUNNING, PAUSED, EXITED
|
||||
```
|
||||
|
||||
#### ContainerPortType
|
||||
```
|
||||
TCP, UDP
|
||||
```
|
||||
|
||||
#### VmState
|
||||
```
|
||||
NOSTATE, RUNNING, IDLE, PAUSED, SHUTDOWN,
|
||||
SHUTOFF, CRASHED, PMSUSPENDED
|
||||
```
|
||||
|
||||
#### NotificationImportance / NotificationType
|
||||
- Importance: Defines severity levels
|
||||
- Type: Categorizes notification sources
|
||||
|
||||
#### Role
|
||||
```
|
||||
ADMIN - Full administrative access
|
||||
CONNECT - Read access with remote management
|
||||
GUEST - Basic profile access
|
||||
VIEWER - Read-only access
|
||||
```
|
||||
|
||||
#### AuthAction
|
||||
```
|
||||
CREATE_ANY, CREATE_OWN
|
||||
READ_ANY, READ_OWN
|
||||
UPDATE_ANY, UPDATE_OWN
|
||||
DELETE_ANY, DELETE_OWN
|
||||
```
|
||||
|
||||
#### Resource (35 total)
|
||||
```
|
||||
ACTIVATION_CODE, API_KEY, ARRAY, CLOUD, CONFIG, CONNECT,
|
||||
CUSTOMIZATIONS, DASHBOARD, DISK, DOCKER, FLASH, INFO,
|
||||
LOGS, ME, NETWORK, NOTIFICATIONS, ONLINE, OS, OWNER,
|
||||
PERMISSION, REGISTRATION, SERVERS, SERVICES, SHARE,
|
||||
VARS, VMS, WELCOME, ...
|
||||
```
|
||||
|
||||
### Core Type Definitions
|
||||
|
||||
#### UnraidArray
|
||||
```graphql
|
||||
type UnraidArray {
|
||||
state: ArrayState!
|
||||
capacity: ArrayCapacity
|
||||
boot: ArrayDisk
|
||||
parities: [ArrayDisk!]!
|
||||
parityCheckStatus: ParityCheck
|
||||
disks: [ArrayDisk!]!
|
||||
caches: [ArrayDisk!]!
|
||||
}
|
||||
```
|
||||
|
||||
#### ArrayDisk
|
||||
```graphql
|
||||
type ArrayDisk implements Node {
|
||||
id: PrefixedID!
|
||||
idx: Int
|
||||
name: String
|
||||
device: String
|
||||
size: BigInt
|
||||
fsSize: String
|
||||
fsFree: String
|
||||
fsUsed: String
|
||||
status: ArrayDiskStatus
|
||||
rotational: Boolean
|
||||
temp: Int
|
||||
numReads: BigInt
|
||||
numWrites: BigInt
|
||||
numErrors: BigInt
|
||||
type: ArrayDiskType
|
||||
exportable: Boolean
|
||||
warning: Int
|
||||
critical: Int
|
||||
fsType: String
|
||||
comment: String
|
||||
format: String
|
||||
transport: String
|
||||
color: ArrayDiskFsColor
|
||||
isSpinning: Boolean
|
||||
}
|
||||
```
|
||||
|
||||
#### DockerContainer
|
||||
```graphql
|
||||
type DockerContainer implements Node {
|
||||
id: PrefixedID!
|
||||
names: [String!]
|
||||
image: String
|
||||
imageId: String
|
||||
command: String
|
||||
created: DateTime
|
||||
ports: [ContainerPort!]
|
||||
lanIpPorts: [String] # LAN-accessible host:port values
|
||||
sizeRootFs: BigInt
|
||||
sizeRw: BigInt
|
||||
sizeLog: BigInt
|
||||
labels: JSON
|
||||
state: ContainerState
|
||||
status: String
|
||||
hostConfig: JSON
|
||||
networkSettings: JSON
|
||||
mounts: JSON
|
||||
autoStart: Boolean
|
||||
autoStartOrder: Int
|
||||
autoStartWait: Int
|
||||
templatePath: String
|
||||
projectUrl: String
|
||||
registryUrl: String
|
||||
supportUrl: String
|
||||
iconUrl: String
|
||||
webUiUrl: String
|
||||
shell: String
|
||||
templatePorts: JSON
|
||||
isOrphaned: Boolean
|
||||
}
|
||||
```
|
||||
|
||||
#### VmDomain
|
||||
```graphql
|
||||
type VmDomain implements Node {
|
||||
id: PrefixedID! # UUID-based identifier
|
||||
name: String # Friendly name
|
||||
state: VmState! # Current state
|
||||
uuid: String @deprecated # Use id instead
|
||||
}
|
||||
```
|
||||
|
||||
#### Share
|
||||
```graphql
|
||||
type Share implements Node {
|
||||
id: PrefixedID!
|
||||
name: String
|
||||
comment: String
|
||||
free: String
|
||||
used: String
|
||||
total: String
|
||||
include: [String]
|
||||
exclude: [String]
|
||||
# Additional capacity/config fields
|
||||
}
|
||||
```
|
||||
|
||||
#### Info (System Information)
|
||||
```graphql
|
||||
type Info {
|
||||
time: DateTime
|
||||
baseboard: Baseboard
|
||||
cpu: CpuInfo
|
||||
devices: Devices
|
||||
display: DisplayInfo
|
||||
machineId: String
|
||||
memory: MemoryInfo
|
||||
os: OsInfo
|
||||
system: SystemInfo
|
||||
versions: Versions
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Authentication & Authorization
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
#### 1. API Key Authentication
|
||||
- **Header**: `x-api-key: YOUR_API_KEY`
|
||||
- Keys stored as JSON files in `/boot/config/plugins/unraid-api/`
|
||||
- Generated via WebGUI (Settings > Management Access > API Keys) or CLI (`unraid-api apikey --create`)
|
||||
- 32-byte hexadecimal keys generated using `crypto.randomBytes`
|
||||
- File system watcher (Chokidar) syncs in-memory cache with disk changes
|
||||
- Keys support both **roles** (simplified) and **permissions** (granular resource:action pairs)
|
||||
|
||||
**API Key Service (`api-key.service.ts`):**
|
||||
```typescript
|
||||
// Key creation validates:
|
||||
// - Name via Unicode-aware regex
|
||||
// - At least one role or permission required
|
||||
// - 32-byte hex key generation
|
||||
// - Overwrite support for existing keys
|
||||
|
||||
// Lookup methods:
|
||||
findById(id) // UUID-based lookup
|
||||
findByField(field, value) // Generic field search
|
||||
findByKey(key) // Direct secret key lookup for auth
|
||||
```
|
||||
|
||||
#### 2. Cookie-Based Sessions
|
||||
- CSRF token validation for non-GET requests
|
||||
- `timingSafeEqual` comparison prevents timing attacks
|
||||
- Session user ID: `-1`
|
||||
- Returns admin role privileges
|
||||
|
||||
#### 3. Local Sessions (CLI/System)
|
||||
- For CLI and system-level operations
|
||||
- Local session user ID: `-2`
|
||||
- Returns local admin with elevated privileges
|
||||
|
||||
#### 4. SSO/OIDC
|
||||
- OpenID Connect client implementation
|
||||
- Separate SSO module with auth, client, core, models, session, and utils subdirectories
|
||||
- JWT validation using Jose library
|
||||
|
||||
### Authorization (RBAC via Casbin)
|
||||
|
||||
**Model:** Resource-based access control with `_ANY` (universal) and `_OWN` (owner-limited) permission modifiers.
|
||||
|
||||
```typescript
|
||||
// Permission enforcement via decorators:
|
||||
@UsePermissions({
|
||||
action: AuthAction.READ_ANY,
|
||||
resource: Resource.ARRAY,
|
||||
})
|
||||
```
|
||||
|
||||
**Casbin Implementation (`api/src/unraid-api/auth/casbin/`):**
|
||||
- `casbin.service.ts` - Core RBAC service
|
||||
- `policy.ts` - Policy configuration
|
||||
- `model.ts` - RBAC model definitions
|
||||
- `resolve-subject.util.ts` - Subject resolution utility
|
||||
- Wildcard permission expansion (`*` -> full CRUD)
|
||||
- Role hierarchy with inherited permissions
|
||||
|
||||
### Auth Files Structure
|
||||
```
|
||||
api/src/unraid-api/auth/
|
||||
├── casbin/
|
||||
│ ├── casbin.module.ts
|
||||
│ ├── casbin.service.ts
|
||||
│ ├── model.ts
|
||||
│ ├── policy.ts
|
||||
│ └── resolve-subject.util.ts
|
||||
├── api-key.service.ts # API key CRUD operations
|
||||
├── auth.interceptor.ts # HTTP auth interceptor
|
||||
├── auth.module.ts # NestJS auth module
|
||||
├── auth.service.ts # Core auth logic (3 strategies)
|
||||
├── authentication.guard.ts # Route protection guard
|
||||
├── cookie.service.ts # Cookie handling
|
||||
├── cookie.strategy.ts # Cookie auth strategy
|
||||
├── fastify-throttler.guard.ts # Rate limiting
|
||||
├── header.strategy.ts # Header-based auth (API keys)
|
||||
├── local-session-lifecycle.service.ts
|
||||
├── local-session.service.ts
|
||||
├── local-session.strategy.ts
|
||||
├── public.decorator.ts # Mark endpoints as public
|
||||
└── user.decorator.ts # User injection decorator
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Resolver Implementations
|
||||
|
||||
### Resolver Directory Structure
|
||||
```
|
||||
api/src/unraid-api/graph/resolvers/
|
||||
├── api-key/ # API key management (10 files)
|
||||
├── array/ # Array operations + parity (11 files)
|
||||
├── cloud/ # Cloud/Connect operations
|
||||
├── config/ # System configuration
|
||||
├── customization/ # UI customization
|
||||
├── disks/ # Disk management (6 files)
|
||||
├── display/ # Display settings
|
||||
├── docker/ # Docker management (36 files)
|
||||
├── flash/ # Flash drive operations
|
||||
├── flash-backup/ # Flash backup management
|
||||
├── info/ # System information (7 subdirs)
|
||||
│ ├── cpu/
|
||||
│ ├── devices/
|
||||
│ ├── display/
|
||||
│ ├── memory/
|
||||
│ ├── os/
|
||||
│ ├── system/
|
||||
│ └── versions/
|
||||
├── logs/ # Log management (8 files)
|
||||
├── metrics/ # System metrics (5 files)
|
||||
├── mutation/ # Root mutation resolver
|
||||
├── notifications/ # Notification management (7 files)
|
||||
├── online/ # Online status
|
||||
├── owner/ # Server owner info
|
||||
├── rclone/ # Cloud storage (8 files)
|
||||
├── registration/ # License/registration
|
||||
├── servers/ # Server management
|
||||
├── settings/ # Settings management (5 files)
|
||||
├── sso/ # SSO/OIDC (8 subdirs)
|
||||
├── ups/ # UPS monitoring (7 files)
|
||||
├── vars/ # Unraid variables
|
||||
└── vms/ # VM management (7 files)
|
||||
```
|
||||
|
||||
### Complete API Surface
|
||||
|
||||
#### Queries
|
||||
|
||||
| Domain | Query | Description | Permission |
|
||||
|--------|-------|-------------|------------|
|
||||
| **Array** | `array` | Get array data (state, capacity, disks, parities, caches) | READ_ANY:ARRAY |
|
||||
| **Disks** | `disks` | List all disks with temp, spin state, capacity | READ_ANY:DISK |
|
||||
| **Disks** | `disk(id)` | Get specific disk by PrefixedID | READ_ANY:DISK |
|
||||
| **Docker** | `docker` | Get Docker instance | READ_ANY:DOCKER |
|
||||
| **Docker** | `container(id)` | Get specific container | READ_ANY:DOCKER |
|
||||
| **Docker** | `containers` | List all containers (optional size info) | READ_ANY:DOCKER |
|
||||
| **Docker** | `logs(id, since, tail)` | Container logs with filtering | READ_ANY:DOCKER |
|
||||
| **Docker** | `networks` | Docker networks | READ_ANY:DOCKER |
|
||||
| **Docker** | `portConflicts` | Port conflict detection | READ_ANY:DOCKER |
|
||||
| **Docker** | `organizer` | Container organization structure | READ_ANY:DOCKER |
|
||||
| **Docker** | `containerUpdateStatuses` | Check update availability | READ_ANY:DOCKER |
|
||||
| **VMs** | `vms` | Get all VM domains | READ_ANY:VMS |
|
||||
| **Info** | `info` | System info (CPU, memory, OS, baseboard, devices, versions) | READ_ANY:INFO |
|
||||
| **Metrics** | `metrics` | System performance metrics | READ_ANY:INFO |
|
||||
| **Logs** | `logFiles` | List available log files | READ_ANY:LOGS |
|
||||
| **Logs** | `logFile(path, lines, startLine)` | Get specific log file content | READ_ANY:LOGS |
|
||||
| **Notifications** | `notifications` | Get all notifications | READ_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `overview` | Notification statistics | READ_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `list` | Filtered notification list | READ_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `warningsAndAlerts` | Deduplicated unread warnings/alerts | READ_ANY:NOTIFICATIONS |
|
||||
| **RClone** | `rclone` | Cloud storage backup settings | READ_ANY:FLASH |
|
||||
| **RClone** | `configForm(formOptions)` | Config form schemas | READ_ANY:FLASH |
|
||||
| **RClone** | `remotes` | List configured remote storage | READ_ANY:FLASH |
|
||||
| **UPS** | `upsDevices` | List UPS devices with status | READ_ANY:* |
|
||||
| **UPS** | `upsDeviceById(id)` | Specific UPS device | READ_ANY:* |
|
||||
| **UPS** | `upsConfiguration` | UPS configuration settings | READ_ANY:* |
|
||||
| **Settings** | `settings` | System settings + SSO config | READ_ANY:CONFIG |
|
||||
| **Shares** | `shares` | Storage shares with capacity | READ_ANY:SHARE |
|
||||
|
||||
#### Mutations
|
||||
|
||||
| Domain | Mutation | Description | Permission |
|
||||
|--------|---------|-------------|------------|
|
||||
| **Array** | `setState(input)` | Set array state (start/stop) | UPDATE_ANY:ARRAY |
|
||||
| **Array** | `addDiskToArray(input)` | Add disk to array | UPDATE_ANY:ARRAY |
|
||||
| **Array** | `removeDiskFromArray(input)` | Remove disk (array must be stopped) | UPDATE_ANY:ARRAY |
|
||||
| **Array** | `mountArrayDisk(id)` | Mount a disk | UPDATE_ANY:ARRAY |
|
||||
| **Array** | `unmountArrayDisk(id)` | Unmount a disk | UPDATE_ANY:ARRAY |
|
||||
| **Array** | `clearArrayDiskStatistics(id)` | Clear disk statistics | UPDATE_ANY:ARRAY |
|
||||
| **Parity** | `start(correct)` | Start parity check | UPDATE_ANY:ARRAY |
|
||||
| **Parity** | `pause` | Pause parity check | UPDATE_ANY:ARRAY |
|
||||
| **Parity** | `resume` | Resume parity check | UPDATE_ANY:ARRAY |
|
||||
| **Parity** | `cancel` | Cancel parity check | UPDATE_ANY:ARRAY |
|
||||
| **Docker** | `start(id)` | Start container | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `stop(id)` | Stop container | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `pause(id)` | Pause container | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `unpause(id)` | Unpause container | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `removeContainer(id, withImage?)` | Remove container (optionally with image) | DELETE_ANY:DOCKER |
|
||||
| **Docker** | `updateContainer(id)` | Update to latest image | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `updateContainers(ids)` | Update multiple containers | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `updateAllContainers` | Update all with available updates | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `updateAutostartConfiguration` | Update auto-start config (feature flag) | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `createDockerFolder` | Create organizational folder | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `setDockerFolderChildren` | Manage folder contents | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `deleteDockerEntries` | Remove folders | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `moveDockerEntriesToFolder` | Reorganize containers | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `moveDockerItemsToPosition` | Position items | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `renameDockerFolder` | Rename folder | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `createDockerFolderWithItems` | Create folder with items | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `syncDockerTemplatePaths` | Sync template data | UPDATE_ANY:DOCKER |
|
||||
| **Docker** | `resetDockerTemplateMappings` | Reset to defaults | UPDATE_ANY:DOCKER |
|
||||
| **VMs** | `start(id)` | Start VM | UPDATE_ANY:VMS |
|
||||
| **VMs** | `stop(id)` | Stop VM | UPDATE_ANY:VMS |
|
||||
| **VMs** | `pause(id)` | Pause VM | UPDATE_ANY:VMS |
|
||||
| **VMs** | `resume(id)` | Resume VM | UPDATE_ANY:VMS |
|
||||
| **VMs** | `forceStop(id)` | Force stop VM | UPDATE_ANY:VMS |
|
||||
| **VMs** | `reboot(id)` | Reboot VM | UPDATE_ANY:VMS |
|
||||
| **VMs** | `reset(id)` | Reset VM | UPDATE_ANY:VMS |
|
||||
| **Notifications** | `createNotification(input)` | Create notification | CREATE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `deleteNotification(id, type)` | Delete notification | DELETE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `deleteArchivedNotifications` | Clear all archived | DELETE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `archiveNotification(id)` | Archive single | UPDATE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `archiveNotifications(ids)` | Archive multiple | UPDATE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `archiveAll(importance?)` | Archive all (optional filter) | UPDATE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `unreadNotification(id)` | Mark as unread | UPDATE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `unarchiveNotifications(ids)` | Restore archived | UPDATE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `unarchiveAll(importance?)` | Restore all archived | UPDATE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `notifyIfUnique(input)` | Create if no equivalent exists | CREATE_ANY:NOTIFICATIONS |
|
||||
| **Notifications** | `recalculateOverview` | Recompute overview stats | UPDATE_ANY:NOTIFICATIONS |
|
||||
| **RClone** | `createRCloneRemote(input)` | Create remote storage | CREATE_ANY:FLASH |
|
||||
| **RClone** | `deleteRCloneRemote(input)` | Delete remote storage | DELETE_ANY:FLASH |
|
||||
| **UPS** | `configureUps(config)` | Update UPS configuration | UPDATE_ANY:* |
|
||||
| **API Keys** | `createApiKey(input)` | Create API key | CREATE_ANY:API_KEY |
|
||||
| **API Keys** | `addRoleForApiKey(input)` | Add role to key | UPDATE_ANY:API_KEY |
|
||||
| **API Keys** | `removeRoleFromApiKey(input)` | Remove role from key | UPDATE_ANY:API_KEY |
|
||||
| **API Keys** | `deleteApiKeys(input)` | Delete API keys | DELETE_ANY:API_KEY |
|
||||
| **API Keys** | `updateApiKey(input)` | Update API key | UPDATE_ANY:API_KEY |
|
||||
|
||||
---
|
||||
|
||||
## 6. Subscription System
|
||||
|
||||
### PubSub Architecture
|
||||
|
||||
The subscription system uses `graphql-subscriptions` PubSub with a Node.js EventEmitter (max 30 listeners).
|
||||
|
||||
**Core PubSub (`api/src/core/pubsub.ts`):**
|
||||
```typescript
|
||||
import EventEmitter from 'events';
|
||||
import { GRAPHQL_PUBSUB_CHANNEL } from '@unraid/shared/pubsub/graphql.pubsub.js';
|
||||
import { PubSub } from 'graphql-subscriptions';
|
||||
|
||||
const eventEmitter = new EventEmitter();
|
||||
eventEmitter.setMaxListeners(30);
|
||||
|
||||
export const pubsub = new PubSub({ eventEmitter });
|
||||
|
||||
export const createSubscription = <T = any>(
|
||||
channel: GRAPHQL_PUBSUB_CHANNEL | string
|
||||
): AsyncIterableIterator<T> => {
|
||||
return pubsub.asyncIterableIterator<T>(channel);
|
||||
};
|
||||
```
|
||||
|
||||
### PubSub Channel Definitions
|
||||
|
||||
**Source:** `packages/unraid-shared/src/pubsub/graphql.pubsub.ts`
|
||||
|
||||
```typescript
|
||||
export const GRAPHQL_PUBSUB_TOKEN = "GRAPHQL_PUBSUB";
|
||||
|
||||
export enum GRAPHQL_PUBSUB_CHANNEL {
|
||||
ARRAY = "ARRAY",
|
||||
CPU_UTILIZATION = "CPU_UTILIZATION",
|
||||
CPU_TELEMETRY = "CPU_TELEMETRY",
|
||||
DASHBOARD = "DASHBOARD",
|
||||
DISPLAY = "DISPLAY",
|
||||
INFO = "INFO",
|
||||
MEMORY_UTILIZATION = "MEMORY_UTILIZATION",
|
||||
NOTIFICATION = "NOTIFICATION",
|
||||
NOTIFICATION_ADDED = "NOTIFICATION_ADDED",
|
||||
NOTIFICATION_OVERVIEW = "NOTIFICATION_OVERVIEW",
|
||||
NOTIFICATION_WARNINGS_AND_ALERTS = "NOTIFICATION_WARNINGS_AND_ALERTS",
|
||||
OWNER = "OWNER",
|
||||
SERVERS = "SERVERS",
|
||||
VMS = "VMS",
|
||||
DOCKER_STATS = "DOCKER_STATS",
|
||||
LOG_FILE = "LOG_FILE",
|
||||
PARITY = "PARITY",
|
||||
}
|
||||
```
|
||||
|
||||
### Available Subscriptions
|
||||
|
||||
| Subscription | Channel | Interval | Description |
|
||||
|-------------|---------|----------|-------------|
|
||||
| `arraySubscription` | ARRAY | Event-based | Real-time array state changes |
|
||||
| `systemMetricsCpu` | CPU_UTILIZATION | 1 second | CPU utilization data |
|
||||
| `systemMetricsCpuTelemetry` | CPU_TELEMETRY | 5 seconds | CPU power & temperature |
|
||||
| `systemMetricsMemory` | MEMORY_UTILIZATION | 2 seconds | Memory utilization |
|
||||
| `dockerContainerStats` | DOCKER_STATS | Polling | Container performance stats |
|
||||
| `logFileSubscription(path)` | LOG_FILE (dynamic) | Event-based | Real-time log file updates |
|
||||
| `notificationAdded` | NOTIFICATION_ADDED | Event-based | New notification created |
|
||||
| `notificationsOverview` | NOTIFICATION_OVERVIEW | Event-based | Overview stats updates |
|
||||
| `notificationsWarningsAndAlerts` | NOTIFICATION_WARNINGS_AND_ALERTS | Event-based | Warning/alert changes |
|
||||
| `upsUpdates` | - | Event-based | UPS device status changes |
|
||||
|
||||
### Subscription Management Services
|
||||
|
||||
Three-tier subscription management:
|
||||
|
||||
1. **SubscriptionManagerService** (low-level, internal)
|
||||
- Manages both polling and event-based subscriptions
|
||||
- Polling: Creates intervals via NestJS SchedulerRegistry with overlap guards
|
||||
- Event-based: Persistent listeners until explicitly stopped
|
||||
- Methods: `startSubscription()`, `stopSubscription()`, `stopAll()`, `isSubscriptionActive()`
|
||||
|
||||
2. **SubscriptionTrackerService** (mid-level)
|
||||
- Reference-counted subscriptions (auto-cleanup when no subscribers)
|
||||
|
||||
3. **SubscriptionHelperService** (high-level, for resolvers)
|
||||
- GraphQL subscriptions with automatic cleanup
|
||||
- Used directly in resolver decorators
|
||||
|
||||
**Dynamic Topics:** The LOG_FILE channel supports dynamic paths like `LOG_FILE:/var/log/test.log` for monitoring specific log files.
|
||||
|
||||
---
|
||||
|
||||
## 7. State Management
|
||||
|
||||
### Redux Store Architecture
|
||||
|
||||
The API uses Redux Toolkit for ephemeral runtime state derived from persistent INI configuration files stored in `/boot/config/`.
|
||||
|
||||
```
|
||||
api/src/store/
|
||||
├── actions/ # Redux action creators
|
||||
├── listeners/ # State change event listeners
|
||||
├── modules/ # Modular state slices
|
||||
├── services/ # Business logic
|
||||
├── state-parsers/ # INI file parsing utilities
|
||||
├── watch/ # Filesystem watchers (Chokidar)
|
||||
├── index.ts # Store initialization
|
||||
├── root-reducer.ts # Combined reducer
|
||||
└── types.ts # State type definitions
|
||||
```
|
||||
|
||||
**Key Design:** The StateManager singleton uses Chokidar to watch filesystem changes on INI config files, enabling reactive synchronization without polling. This accommodates legacy CLI tools and scripts that modify configuration outside the API.
|
||||
|
||||
---
|
||||
|
||||
## 8. Plugin Architecture
|
||||
|
||||
### Dynamic Plugin System
|
||||
|
||||
The API supports dynamic plugin loading at runtime through NestJS:
|
||||
|
||||
```
|
||||
packages/
|
||||
├── unraid-api-plugin-connect/ # Remote access, UPnP integration
|
||||
├── unraid-api-plugin-generator/ # Code generation
|
||||
├── unraid-api-plugin-health/ # Health monitoring
|
||||
└── unraid-shared/ # Shared types, enums, utilities
|
||||
```
|
||||
|
||||
**Plugin Loading:** Plugins load conditionally based on installation state. The `unraid-api-plugin-connect` handles remote access as an optional peer dependency.
|
||||
|
||||
### Schema Migration Status
|
||||
|
||||
The API is **actively migrating** from schema-first to code-first GraphQL:
|
||||
|
||||
- **Completed:** API Key Resolver (1/21)
|
||||
- **Pending (20 resolvers):** Docker, Array, Disks, VMs, Connect, Display, Info, Owner, Unassigned Devices, Cloud, Flash, Config, Vars, Logs, Users, Notifications, Network, Registration, Servers, Services, Shares
|
||||
|
||||
**Migration pattern per resolver:**
|
||||
1. Create model files with `@ObjectType()` and `@InputType()` decorators
|
||||
2. Define return types and input parameters as classes
|
||||
3. Update resolver to use new model classes
|
||||
4. Create module file for dependency registration
|
||||
5. Test functionality
|
||||
|
||||
---
|
||||
|
||||
## 9. Release History
|
||||
|
||||
### Recent Releases (Reverse Chronological)
|
||||
|
||||
| Version | Date | Highlights |
|
||||
|---------|------|------------|
|
||||
| **v4.29.2** | Dec 19, 2025 | Fix: connect plugin not loaded when connect installed |
|
||||
| **v4.29.1** | Dec 19, 2025 | Reverted docker overview web component; fixed GUID/license race |
|
||||
| **v4.29.0** | Dec 19, 2025 | Feature: Docker overview web component for 7.3+ |
|
||||
| **v4.28.2** | Dec 16, 2025 | API startup timeout for v7.0 and v6.12 |
|
||||
| **v4.28.0** | Dec 15, 2025 | Feature: Plugin cleanup on OS upgrade cancel; keyfile polling; dark mode |
|
||||
| **v4.27.2** | Nov 21, 2025 | Fix: header flashing and trial date display |
|
||||
| **v4.27.0** | Nov 19, 2025 | Feature: Removed API log download; fixed connect plugin uninstall |
|
||||
| **v4.26.0** | Nov 17, 2025 | Feature: CPU power query/subscription; Apollo Studio schema publish |
|
||||
| **v4.25.0** | Sep 26, 2025 | Feature: Tailwind scoping; notification filter pills |
|
||||
| **v4.24.0** | Sep 18, 2025 | Feature: Optimized DOM content loading |
|
||||
| **v4.23.0** | Sep 16, 2025 | Feature: API status manager |
|
||||
|
||||
### Milestone Releases
|
||||
- **Open-sourced:** January 2025
|
||||
- **v4.0.0:** OIDC/SSO support and permissions system
|
||||
- **Native in Unraid 7.2+:** October 29, 2025
|
||||
|
||||
---
|
||||
|
||||
## 10. Roadmap & Upcoming Features
|
||||
|
||||
### Near-Term (Q1-Q2 2025, some may be completed)
|
||||
- Developer Tools for Plugins (Q2)
|
||||
- New modernized settings pages (Q2)
|
||||
- Redesigned Unraid Connect configuration (Q1)
|
||||
- Custom theme creation (Q2-Q3)
|
||||
- Storage pool management (Q2)
|
||||
|
||||
### Mid-Term (Q3 2025)
|
||||
- Modern Docker status interface redesign
|
||||
- New plugins interface with redesigned management UI
|
||||
- Streamlined Docker container deployment
|
||||
- Real-time pool health monitoring
|
||||
|
||||
### Under Consideration (TBD)
|
||||
- Docker Compose native support
|
||||
- Advanced plugin configuration/development tools
|
||||
- Storage share creation, settings, and unified management dashboard
|
||||
|
||||
---
|
||||
|
||||
## 11. Open Issues & Community Requests
|
||||
|
||||
### Open Issues: 32 total
|
||||
|
||||
#### Feature Requests (Enhancements)
|
||||
| Issue | Title | Description |
|
||||
|-------|-------|-------------|
|
||||
| #1873 | Invoke Mover through API | Programmatic access to the Mover tool |
|
||||
| #1872 | CLI list with creation dates | Timestamp data in CLI operations |
|
||||
| #1871 | Container restart/update mutation | Single operation to restart+update containers |
|
||||
| #1839 | SMART disk data | Detailed disk health monitoring via SMART |
|
||||
| #1827-1828 | Nuxt UI upgrades | Interface modernization |
|
||||
|
||||
#### Reported Bugs
|
||||
| Issue | Title | Impact |
|
||||
|-------|-------|--------|
|
||||
| #1861 | VM suspension issues | Cannot unsuspend PMSUSPENDED VMs |
|
||||
| #1842 | Temperature inconsistency | SSD temps unavailable in Disk queries but accessible via Array |
|
||||
| #1840 | Cache invalidation | Docker container data stale after external changes |
|
||||
| #1837 | GraphQL partial failures | Entire queries fail when VMs/Docker unavailable |
|
||||
| #1859 | Notification counting errors | Archive counts include duplicates |
|
||||
| #1818 | Network query failures | GraphQL returns empty lists for network data |
|
||||
| #1825 | UPS false data | Hardcoded values returned when no UPS connected |
|
||||
|
||||
#### Key Takeaways for unraid-mcp
|
||||
1. **#1837 is critical**: We should handle partial GraphQL failures gracefully
|
||||
2. **#1842**: Temperature data should be queried from Array endpoint, not Disk
|
||||
3. **#1840**: Docker cache may return stale data; consider cache-busting strategies
|
||||
4. **#1825**: UPS data validation needed - API returns fake data with no UPS
|
||||
5. **#1861**: VM `PMSUSPENDED` state needs special handling
|
||||
6. **#1871**: Container restart+update is a common need not yet in the API
|
||||
|
||||
---
|
||||
|
||||
## 12. Community Projects & Integrations
|
||||
|
||||
### 1. Unraid Management Agent (Go)
|
||||
**Repository:** https://github.com/ruaan-deysel/unraid-management-agent
|
||||
**Author:** Ruaan Deysel
|
||||
**Language:** Go
|
||||
|
||||
A comprehensive third-party plugin providing:
|
||||
- **57 REST endpoints** at `http://localhost:8043/api/v1`
|
||||
- **54 MCP tools** for AI agent integration
|
||||
- **41 Prometheus metrics** for monitoring
|
||||
- **WebSocket** real-time event streaming
|
||||
- **MQTT** publishing for IoT integration
|
||||
|
||||
**Architecture:** Event-driven with collectors -> event bus -> API cache pattern
|
||||
- System Collector (15s): CPU, RAM, temperatures
|
||||
- Array/Disk Collectors (30s): Storage metrics
|
||||
- Docker/VM Collectors (30s): Container/VM data
|
||||
- Native Go libraries (Docker SDK, libvirt bindings, /proc/sys access)
|
||||
|
||||
**Key Endpoints:**
|
||||
```
|
||||
/api/v1/health # Health check
|
||||
/api/v1/system # System info
|
||||
/api/v1/array # Array status
|
||||
/api/v1/disks # Disk info
|
||||
/api/v1/docker # Docker containers
|
||||
/api/v1/vm # Virtual machines
|
||||
/api/v1/network # Network interfaces
|
||||
/api/v1/shares # User shares
|
||||
/api/v1/gpu # GPU metrics
|
||||
/api/v1/ups # UPS status
|
||||
/api/v1/settings/* # Disk thresholds, mover config
|
||||
/api/v1/plugins # Plugin info
|
||||
/api/v1/updates # Update status
|
||||
```
|
||||
|
||||
### 2. Home Assistant - domalab/ha-unraid
|
||||
**Repository:** https://github.com/domalab/ha-unraid
|
||||
**Status:** Active (rebuilt in 2025.12.0 for GraphQL)
|
||||
**Requires:** Unraid 7.2.0+, API key
|
||||
|
||||
**Features:**
|
||||
- CPU usage, temperature, power consumption monitoring
|
||||
- Memory utilization tracking
|
||||
- Array state, per-disk and per-share metrics
|
||||
- Docker container start/stop switches
|
||||
- VM management controls
|
||||
- UPS monitoring with energy dashboard integration
|
||||
- Notification counts
|
||||
- Dynamic entity creation (only creates entities for available services)
|
||||
|
||||
**Polling:** System data 30s, storage data 5min
|
||||
|
||||
### 3. Home Assistant - chris-mc1/unraid_api
|
||||
**Repository:** https://github.com/chris-mc1/unraid_api
|
||||
**Status:** Active
|
||||
**Requires:** Unraid 7.2+, API key with Info/Servers/Array/Disk/Share read permissions
|
||||
|
||||
**Features:**
|
||||
- Array status, storage utilization
|
||||
- RAM and CPU usage
|
||||
- Per-share free space (optional)
|
||||
- Per-disk metrics: temperature, spin state, capacity
|
||||
- Python-based (99.9%)
|
||||
|
||||
### 4. Home Assistant - ruaan-deysel/ha-unraid
|
||||
**Repository:** https://github.com/ruaan-deysel/ha-unraid
|
||||
**Status:** Active
|
||||
**Note:** Uses the management agent's REST API rather than official GraphQL
|
||||
|
||||
### 5. Home Assistant - IDmedia/hass-unraid
|
||||
**Repository:** https://github.com/IDmedia/hass-unraid
|
||||
**Approach:** Docker container that parses WebSocket messages and forwards to HA via MQTT
|
||||
|
||||
### 6. unraid-mcp (This Project)
|
||||
**Repository:** https://github.com/jmagar/unraid-mcp
|
||||
**Language:** Python (FastMCP)
|
||||
**Features:** 26 MCP tools, GraphQL client, WebSocket subscriptions
|
||||
|
||||
---
|
||||
|
||||
## 13. Architectural Insights for unraid-mcp
|
||||
|
||||
### Gaps in Our Current Implementation
|
||||
|
||||
Based on this research, potential improvements for unraid-mcp:
|
||||
|
||||
#### Missing Queries We Could Add
|
||||
1. **Metrics subscriptions** - CPU (1s), CPU telemetry (5s), memory (2s) real-time data
|
||||
2. **Docker port conflicts** - `portConflicts` query
|
||||
3. **Docker organizer** - Folder management queries/mutations
|
||||
4. **Docker update statuses** - Check for container image updates
|
||||
5. **Parity check operations** - Start (with correct flag), pause, resume, cancel
|
||||
6. **UPS monitoring** - Devices, configuration, real-time updates subscription
|
||||
7. **API key management** - Full CRUD on API keys
|
||||
8. **Settings management** - System settings queries
|
||||
9. **SSO/OIDC configuration** - SSO settings
|
||||
10. **Disk mount/unmount** - `mountArrayDisk` and `unmountArrayDisk` mutations
|
||||
11. **Container removal** - `removeContainer` with optional image cleanup
|
||||
12. **Container bulk updates** - `updateContainers` and `updateAllContainers`
|
||||
13. **Flash backup** - Flash drive backup operations
|
||||
|
||||
#### GraphQL Query Patterns to Match
|
||||
|
||||
**Official query examples from Unraid docs:**
|
||||
```graphql
|
||||
# System Status
|
||||
query {
|
||||
info {
|
||||
os { platform, distro, release, uptime }
|
||||
cpu { manufacturer, brand, cores, threads }
|
||||
}
|
||||
}
|
||||
|
||||
# Array Monitoring
|
||||
query {
|
||||
array {
|
||||
state
|
||||
capacity { disks { free, used, total } }
|
||||
disks { name, size, status, temp }
|
||||
}
|
||||
}
|
||||
|
||||
# Docker Containers
|
||||
query {
|
||||
dockerContainers {
|
||||
id, names, state, status, autoStart
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Authentication Best Practices
|
||||
- Use `x-api-key` header (not query parameters)
|
||||
- API keys need specific resource:action permissions
|
||||
- For our MCP server, recommend keys with: `READ_ANY` on all resources + `UPDATE_ANY` on DOCKER, VMS, ARRAY for management operations
|
||||
- Keys are stored at `/boot/config/plugins/unraid-api/`
|
||||
|
||||
#### Known Issues to Handle
|
||||
1. **Partial query failures (#1837):** Wrap individual sections in try/catch; don't let VM failures crash Docker queries
|
||||
2. **Temperature inconsistency (#1842):** Prefer Array endpoint for temperature data
|
||||
3. **Docker cache staleness (#1840):** Use cache-busting options when available
|
||||
4. **UPS phantom data (#1825):** Validate UPS data before presenting
|
||||
5. **VM PMSUSPENDED (#1861):** Handle this state explicitly; unsuspend may fail
|
||||
6. **Increased timeouts for disks:** The official API uses 90s read timeouts for disk operations
|
||||
|
||||
#### Subscription Channel Mapping
|
||||
|
||||
Our subscription implementation should align with the official channels:
|
||||
```
|
||||
ARRAY -> array state changes
|
||||
CPU_UTILIZATION -> 1s CPU metrics
|
||||
CPU_TELEMETRY -> 5s CPU power/temp
|
||||
MEMORY_UTILIZATION -> 2s memory metrics
|
||||
DOCKER_STATS -> container stats
|
||||
LOG_FILE + dynamic path -> log file tailing
|
||||
NOTIFICATION_ADDED -> new notifications
|
||||
NOTIFICATION_OVERVIEW -> notification counts
|
||||
NOTIFICATION_WARNINGS_AND_ALERTS -> warnings/alerts
|
||||
PARITY -> parity check progress
|
||||
VMS -> VM state changes
|
||||
```
|
||||
|
||||
#### Performance Considerations
|
||||
- Max 30 concurrent subscription connections (EventEmitter limit)
|
||||
- Disk operations need extended timeouts (90s+)
|
||||
- Docker `sizeRootFs` query is expensive; make it optional
|
||||
- Storage data polling at 5min intervals (not faster) due to SMART query overhead
|
||||
- cache-manager v7 expects TTL in milliseconds (not seconds)
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Key Source File References
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `packages/unraid-shared/src/pubsub/graphql.pubsub.ts` | PubSub channel enum (17 channels) |
|
||||
| `packages/unraid-shared/src/graphql-enums.ts` | AuthAction, Resource (35), Role enums |
|
||||
| `packages/unraid-shared/src/graphql.model.ts` | Shared GraphQL models |
|
||||
| `packages/unraid-shared/src/use-permissions.directive.ts` | Permission enforcement decorator |
|
||||
| `api/src/core/pubsub.ts` | PubSub singleton + subscription factory |
|
||||
| `api/src/unraid-api/auth/auth.service.ts` | 3-strategy auth (API key, cookie, local) |
|
||||
| `api/src/unraid-api/auth/api-key.service.ts` | API key CRUD + validation |
|
||||
| `api/src/unraid-api/auth/casbin/policy.ts` | RBAC policy definitions |
|
||||
| `api/src/unraid-api/graph/resolvers/docker/docker.resolver.ts` | Docker queries + organizer |
|
||||
| `api/src/unraid-api/graph/resolvers/docker/docker.mutations.resolver.ts` | Docker mutations (9 ops) |
|
||||
| `api/src/unraid-api/graph/resolvers/vms/vms.resolver.ts` | VM queries |
|
||||
| `api/src/unraid-api/graph/resolvers/vms/vms.mutations.resolver.ts` | VM mutations (7 ops) |
|
||||
| `api/src/unraid-api/graph/resolvers/array/array.resolver.ts` | Array query + subscription |
|
||||
| `api/src/unraid-api/graph/resolvers/array/array.mutations.resolver.ts` | Array mutations (6 ops) |
|
||||
| `api/src/unraid-api/graph/resolvers/array/parity.mutations.resolver.ts` | Parity mutations (4 ops) |
|
||||
| `api/src/unraid-api/graph/resolvers/notifications/notifications.resolver.ts` | Notification CRUD + subs |
|
||||
| `api/src/unraid-api/graph/resolvers/metrics/metrics.resolver.ts` | System metrics + subs |
|
||||
| `api/src/unraid-api/graph/resolvers/logs/logs.resolver.ts` | Log queries + subscription |
|
||||
| `api/src/unraid-api/graph/resolvers/rclone/rclone.resolver.ts` | RClone queries |
|
||||
| `api/src/unraid-api/graph/resolvers/rclone/rclone.mutation.resolver.ts` | RClone mutations |
|
||||
| `api/src/unraid-api/graph/resolvers/ups/ups.resolver.ts` | UPS queries + mutations + sub |
|
||||
| `api/src/unraid-api/graph/resolvers/api-key/api-key.mutation.ts` | API key mutations (5 ops) |
|
||||
| `api/generated-schema.graphql` | Complete auto-generated schema |
|
||||
1
docs/unraid-api-introspection.json
Normal file
1
docs/unraid-api-introspection.json
Normal file
File diff suppressed because one or more lines are too long
723
docs/unraid-api-summary.md
Normal file
723
docs/unraid-api-summary.md
Normal file
@@ -0,0 +1,723 @@
|
||||
# Unraid API v4.29.2 — Introspection Summary
|
||||
|
||||
> Auto-generated from live API introspection on 2026-03-15
|
||||
> Source: tootie (10.1.0.2:31337)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Query Fields](#query-fields)
|
||||
- [Mutation Fields](#mutation-fields)
|
||||
- [Subscription Fields](#subscription-fields)
|
||||
- [Enum Types](#enum-types)
|
||||
- [Input Types](#input-types)
|
||||
- [Object Types](#object-types)
|
||||
|
||||
## Query Fields
|
||||
|
||||
**46 fields**
|
||||
|
||||
| Field | Return Type | Arguments |
|
||||
|-------|-------------|-----------|
|
||||
| `apiKey` | `ApiKey` | `id: PrefixedID!` |
|
||||
| `apiKeyPossiblePermissions` | `[Permission!]!` | — |
|
||||
| `apiKeyPossibleRoles` | `[Role!]!` | — |
|
||||
| `apiKeys` | `[ApiKey!]!` | — |
|
||||
| `array` | `UnraidArray!` | — |
|
||||
| `config` | `Config!` | — |
|
||||
| `customization` | `Customization` | — |
|
||||
| `disk` | `Disk!` | `id: PrefixedID!` |
|
||||
| `disks` | `[Disk!]!` | — |
|
||||
| `docker` | `Docker!` | — |
|
||||
| `flash` | `Flash!` | — |
|
||||
| `getApiKeyCreationFormSchema` | `ApiKeyFormSettings!` | — |
|
||||
| `getAvailableAuthActions` | `[AuthAction!]!` | — |
|
||||
| `getPermissionsForRoles` | `[Permission!]!` | `roles: [Role!]!` |
|
||||
| `info` | `Info!` | — |
|
||||
| `isInitialSetup` | `Boolean!` | — |
|
||||
| `isSSOEnabled` | `Boolean!` | — |
|
||||
| `logFile` | `LogFileContent!` | `path: String!`, `lines: Int`, `startLine: Int` |
|
||||
| `logFiles` | `[LogFile!]!` | — |
|
||||
| `me` | `UserAccount!` | — |
|
||||
| `metrics` | `Metrics!` | — |
|
||||
| `notifications` | `Notifications!` | — |
|
||||
| `oidcConfiguration` | `OidcConfiguration!` | — |
|
||||
| `oidcProvider` | `OidcProvider` | `id: PrefixedID!` |
|
||||
| `oidcProviders` | `[OidcProvider!]!` | — |
|
||||
| `online` | `Boolean!` | — |
|
||||
| `owner` | `Owner!` | — |
|
||||
| `parityHistory` | `[ParityCheck!]!` | — |
|
||||
| `plugins` | `[Plugin!]!` | — |
|
||||
| `previewEffectivePermissions` | `[Permission!]!` | `roles: [Role!]`, `permissions: [AddPermissionInput!]` |
|
||||
| `publicOidcProviders` | `[PublicOidcProvider!]!` | — |
|
||||
| `publicPartnerInfo` | `PublicPartnerInfo` | — |
|
||||
| `publicTheme` | `Theme!` | — |
|
||||
| `rclone` | `RCloneBackupSettings!` | — |
|
||||
| `registration` | `Registration` | — |
|
||||
| `server` | `Server` | — |
|
||||
| `servers` | `[Server!]!` | — |
|
||||
| `services` | `[Service!]!` | — |
|
||||
| `settings` | `Settings!` | — |
|
||||
| `shares` | `[Share!]!` | — |
|
||||
| `upsConfiguration` | `UPSConfiguration!` | — |
|
||||
| `upsDeviceById` | `UPSDevice` | `id: String!` |
|
||||
| `upsDevices` | `[UPSDevice!]!` | — |
|
||||
| `validateOidcSession` | `OidcSessionValidation!` | `token: String!` |
|
||||
| `vars` | `Vars!` | — |
|
||||
| `vms` | `Vms!` | — |
|
||||
|
||||
## Mutation Fields
|
||||
|
||||
**22 fields**
|
||||
|
||||
| Field | Return Type | Arguments |
|
||||
|-------|-------------|-----------|
|
||||
| `addPlugin` | `Boolean!` | `input: PluginManagementInput!` |
|
||||
| `apiKey` | `ApiKeyMutations!` | — |
|
||||
| `archiveAll` | `NotificationOverview!` | `importance: NotificationImportance` |
|
||||
| `archiveNotification` | `Notification!` | `id: PrefixedID!` |
|
||||
| `archiveNotifications` | `NotificationOverview!` | `ids: [PrefixedID!]!` |
|
||||
| `array` | `ArrayMutations!` | — |
|
||||
| `configureUps` | `Boolean!` | `config: UPSConfigInput!` |
|
||||
| `createNotification` | `Notification!` | `input: NotificationData!` |
|
||||
| `customization` | `CustomizationMutations!` | — |
|
||||
| `deleteArchivedNotifications` | `NotificationOverview!` | — |
|
||||
| `deleteNotification` | `NotificationOverview!` | `id: PrefixedID!`, `type: NotificationType!` |
|
||||
| `docker` | `DockerMutations!` | — |
|
||||
| `initiateFlashBackup` | `FlashBackupStatus!` | `input: InitiateFlashBackupInput!` |
|
||||
| `parityCheck` | `ParityCheckMutations!` | — |
|
||||
| `rclone` | `RCloneMutations!` | — |
|
||||
| `recalculateOverview` | `NotificationOverview!` | — |
|
||||
| `removePlugin` | `Boolean!` | `input: PluginManagementInput!` |
|
||||
| `unarchiveAll` | `NotificationOverview!` | `importance: NotificationImportance` |
|
||||
| `unarchiveNotifications` | `NotificationOverview!` | `ids: [PrefixedID!]!` |
|
||||
| `unreadNotification` | `Notification!` | `id: PrefixedID!` |
|
||||
| `updateSettings` | `UpdateSettingsResponse!` | `input: JSON!` |
|
||||
| `vm` | `VmMutations!` | — |
|
||||
|
||||
## Subscription Fields
|
||||
|
||||
**11 fields**
|
||||
|
||||
| Field | Return Type | Arguments |
|
||||
|-------|-------------|-----------|
|
||||
| `arraySubscription` | `UnraidArray!` | — |
|
||||
| `logFile` | `LogFileContent!` | `path: String!` |
|
||||
| `notificationAdded` | `Notification!` | — |
|
||||
| `notificationsOverview` | `NotificationOverview!` | — |
|
||||
| `ownerSubscription` | `Owner!` | — |
|
||||
| `parityHistorySubscription` | `ParityCheck!` | — |
|
||||
| `serversSubscription` | `Server!` | — |
|
||||
| `systemMetricsCpu` | `CpuUtilization!` | — |
|
||||
| `systemMetricsCpuTelemetry` | `CpuPackages!` | — |
|
||||
| `systemMetricsMemory` | `MemoryUtilization!` | — |
|
||||
| `upsUpdates` | `UPSDevice!` | — |
|
||||
|
||||
## Enum Types
|
||||
|
||||
**30 enums**
|
||||
|
||||
### `ArrayDiskFsColor`
|
||||
|
||||
```
|
||||
GREEN_ON
|
||||
GREEN_BLINK
|
||||
BLUE_ON
|
||||
BLUE_BLINK
|
||||
YELLOW_ON
|
||||
YELLOW_BLINK
|
||||
RED_ON
|
||||
RED_OFF
|
||||
GREY_OFF
|
||||
```
|
||||
|
||||
### `ArrayDiskStatus`
|
||||
|
||||
```
|
||||
DISK_NP
|
||||
DISK_OK
|
||||
DISK_NP_MISSING
|
||||
DISK_INVALID
|
||||
DISK_WRONG
|
||||
DISK_DSBL
|
||||
DISK_NP_DSBL
|
||||
DISK_DSBL_NEW
|
||||
DISK_NEW
|
||||
```
|
||||
|
||||
### `ArrayDiskType`
|
||||
|
||||
```
|
||||
DATA
|
||||
PARITY
|
||||
FLASH
|
||||
CACHE
|
||||
```
|
||||
|
||||
### `ArrayState`
|
||||
|
||||
```
|
||||
STARTED
|
||||
STOPPED
|
||||
NEW_ARRAY
|
||||
RECON_DISK
|
||||
DISABLE_DISK
|
||||
SWAP_DSBL
|
||||
INVALID_EXPANSION
|
||||
PARITY_NOT_BIGGEST
|
||||
TOO_MANY_MISSING_DISKS
|
||||
NEW_DISK_TOO_SMALL
|
||||
NO_DATA_DISKS
|
||||
```
|
||||
|
||||
### `ArrayStateInputState`
|
||||
|
||||
```
|
||||
START
|
||||
STOP
|
||||
```
|
||||
|
||||
### `AuthAction`
|
||||
> Authentication actions with possession (e.g., create:any, read:own)
|
||||
|
||||
```
|
||||
CREATE_ANY
|
||||
CREATE_OWN
|
||||
READ_ANY
|
||||
READ_OWN
|
||||
UPDATE_ANY
|
||||
UPDATE_OWN
|
||||
DELETE_ANY
|
||||
DELETE_OWN
|
||||
```
|
||||
|
||||
### `AuthorizationOperator`
|
||||
> Operators for authorization rule matching
|
||||
|
||||
```
|
||||
EQUALS
|
||||
CONTAINS
|
||||
ENDS_WITH
|
||||
STARTS_WITH
|
||||
```
|
||||
|
||||
### `AuthorizationRuleMode`
|
||||
> Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass)
|
||||
|
||||
```
|
||||
OR
|
||||
AND
|
||||
```
|
||||
|
||||
### `ConfigErrorState`
|
||||
> Possible error states for configuration
|
||||
|
||||
```
|
||||
UNKNOWN_ERROR
|
||||
INELIGIBLE
|
||||
INVALID
|
||||
NO_KEY_SERVER
|
||||
WITHDRAWN
|
||||
```
|
||||
|
||||
### `ContainerPortType`
|
||||
|
||||
```
|
||||
TCP
|
||||
UDP
|
||||
```
|
||||
|
||||
### `ContainerState`
|
||||
|
||||
```
|
||||
RUNNING
|
||||
EXITED
|
||||
```
|
||||
|
||||
### `DiskFsType`
|
||||
> The type of filesystem on the disk partition
|
||||
|
||||
```
|
||||
XFS
|
||||
BTRFS
|
||||
VFAT
|
||||
ZFS
|
||||
EXT4
|
||||
NTFS
|
||||
```
|
||||
|
||||
### `DiskInterfaceType`
|
||||
> The type of interface the disk uses to connect to the system
|
||||
|
||||
```
|
||||
SAS
|
||||
SATA
|
||||
USB
|
||||
PCIE
|
||||
UNKNOWN
|
||||
```
|
||||
|
||||
### `DiskSmartStatus`
|
||||
> The SMART (Self-Monitoring, Analysis and Reporting Technology) status of the disk
|
||||
|
||||
```
|
||||
OK
|
||||
UNKNOWN
|
||||
```
|
||||
|
||||
### `NotificationImportance`
|
||||
|
||||
```
|
||||
ALERT
|
||||
INFO
|
||||
WARNING
|
||||
```
|
||||
|
||||
### `NotificationType`
|
||||
|
||||
```
|
||||
UNREAD
|
||||
ARCHIVE
|
||||
```
|
||||
|
||||
### `ParityCheckStatus`
|
||||
|
||||
```
|
||||
NEVER_RUN
|
||||
RUNNING
|
||||
PAUSED
|
||||
COMPLETED
|
||||
CANCELLED
|
||||
FAILED
|
||||
```
|
||||
|
||||
### `RegistrationState`
|
||||
|
||||
```
|
||||
TRIAL
|
||||
BASIC
|
||||
PLUS
|
||||
PRO
|
||||
STARTER
|
||||
UNLEASHED
|
||||
LIFETIME
|
||||
EEXPIRED
|
||||
EGUID
|
||||
EGUID1
|
||||
ETRIAL
|
||||
ENOKEYFILE
|
||||
ENOKEYFILE1
|
||||
ENOKEYFILE2
|
||||
ENOFLASH
|
||||
ENOFLASH1
|
||||
ENOFLASH2
|
||||
ENOFLASH3
|
||||
ENOFLASH4
|
||||
ENOFLASH5
|
||||
ENOFLASH6
|
||||
ENOFLASH7
|
||||
EBLACKLISTED
|
||||
EBLACKLISTED1
|
||||
EBLACKLISTED2
|
||||
ENOCONN
|
||||
```
|
||||
|
||||
### `Resource`
|
||||
> Available resources for permissions
|
||||
|
||||
```
|
||||
ACTIVATION_CODE
|
||||
API_KEY
|
||||
ARRAY
|
||||
CLOUD
|
||||
CONFIG
|
||||
CONNECT
|
||||
CONNECT__REMOTE_ACCESS
|
||||
CUSTOMIZATIONS
|
||||
DASHBOARD
|
||||
DISK
|
||||
DISPLAY
|
||||
DOCKER
|
||||
FLASH
|
||||
INFO
|
||||
LOGS
|
||||
ME
|
||||
NETWORK
|
||||
NOTIFICATIONS
|
||||
ONLINE
|
||||
OS
|
||||
OWNER
|
||||
PERMISSION
|
||||
REGISTRATION
|
||||
SERVERS
|
||||
SERVICES
|
||||
SHARE
|
||||
VARS
|
||||
VMS
|
||||
WELCOME
|
||||
```
|
||||
|
||||
### `Role`
|
||||
> Available roles for API keys and users
|
||||
|
||||
```
|
||||
ADMIN
|
||||
CONNECT
|
||||
GUEST
|
||||
VIEWER
|
||||
```
|
||||
|
||||
### `ServerStatus`
|
||||
|
||||
```
|
||||
ONLINE
|
||||
OFFLINE
|
||||
NEVER_CONNECTED
|
||||
```
|
||||
|
||||
### `Temperature`
|
||||
> Temperature unit
|
||||
|
||||
```
|
||||
CELSIUS
|
||||
FAHRENHEIT
|
||||
```
|
||||
|
||||
### `ThemeName`
|
||||
> The theme name
|
||||
|
||||
```
|
||||
azure
|
||||
black
|
||||
gray
|
||||
white
|
||||
```
|
||||
|
||||
### `UPSCableType`
|
||||
> UPS cable connection types
|
||||
|
||||
```
|
||||
USB
|
||||
SIMPLE
|
||||
SMART
|
||||
ETHER
|
||||
CUSTOM
|
||||
```
|
||||
|
||||
### `UPSKillPower`
|
||||
> Kill UPS power after shutdown option
|
||||
|
||||
```
|
||||
YES
|
||||
NO
|
||||
```
|
||||
|
||||
### `UPSServiceState`
|
||||
> Service state for UPS daemon
|
||||
|
||||
```
|
||||
ENABLE
|
||||
DISABLE
|
||||
```
|
||||
|
||||
### `UPSType`
|
||||
> UPS communication protocols
|
||||
|
||||
```
|
||||
USB
|
||||
APCSMART
|
||||
NET
|
||||
SNMP
|
||||
DUMB
|
||||
PCNET
|
||||
MODBUS
|
||||
```
|
||||
|
||||
### `UpdateStatus`
|
||||
> Update status of a container.
|
||||
|
||||
```
|
||||
UP_TO_DATE
|
||||
UPDATE_AVAILABLE
|
||||
REBUILD_READY
|
||||
UNKNOWN
|
||||
```
|
||||
|
||||
### `VmState`
|
||||
> The state of a virtual machine
|
||||
|
||||
```
|
||||
NOSTATE
|
||||
RUNNING
|
||||
IDLE
|
||||
PAUSED
|
||||
SHUTDOWN
|
||||
SHUTOFF
|
||||
CRASHED
|
||||
PMSUSPENDED
|
||||
```
|
||||
|
||||
### `registrationType`
|
||||
|
||||
```
|
||||
BASIC
|
||||
PLUS
|
||||
PRO
|
||||
STARTER
|
||||
UNLEASHED
|
||||
LIFETIME
|
||||
INVALID
|
||||
TRIAL
|
||||
```
|
||||
|
||||
## Input Types
|
||||
|
||||
**16 input types**
|
||||
|
||||
### `AddPermissionInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `resource` | `Resource!` | — |
|
||||
| `actions` | `[AuthAction!]!` | — |
|
||||
|
||||
### `AddRoleForApiKeyInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `apiKeyId` | `PrefixedID!` | — |
|
||||
| `role` | `Role!` | — |
|
||||
|
||||
### `ArrayDiskInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `id` | `PrefixedID!` | — |
|
||||
| `slot` | `Int` | — |
|
||||
|
||||
### `ArrayStateInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `desiredState` | `ArrayStateInputState!` | — |
|
||||
|
||||
### `CreateApiKeyInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `name` | `String!` | — |
|
||||
| `description` | `String` | — |
|
||||
| `roles` | `[Role!]` | — |
|
||||
| `permissions` | `[AddPermissionInput!]` | — |
|
||||
| `overwrite` | `Boolean` | — |
|
||||
|
||||
### `CreateRCloneRemoteInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `name` | `String!` | — |
|
||||
| `type` | `String!` | — |
|
||||
| `parameters` | `JSON!` | — |
|
||||
|
||||
### `DeleteApiKeyInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `ids` | `[PrefixedID!]!` | — |
|
||||
|
||||
### `DeleteRCloneRemoteInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `name` | `String!` | — |
|
||||
|
||||
### `InitiateFlashBackupInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `remoteName` | `String!` | — |
|
||||
| `sourcePath` | `String!` | — |
|
||||
| `destinationPath` | `String!` | — |
|
||||
| `options` | `JSON` | — |
|
||||
|
||||
### `NotificationData`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `title` | `String!` | — |
|
||||
| `subject` | `String!` | — |
|
||||
| `description` | `String!` | — |
|
||||
| `importance` | `NotificationImportance!` | — |
|
||||
| `link` | `String` | — |
|
||||
|
||||
### `NotificationFilter`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `importance` | `NotificationImportance` | — |
|
||||
| `type` | `NotificationType!` | — |
|
||||
| `offset` | `Int!` | — |
|
||||
| `limit` | `Int!` | — |
|
||||
|
||||
### `PluginManagementInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `names` | `[String!]!` | — |
|
||||
| `bundled` | `Boolean!` | false |
|
||||
| `restart` | `Boolean!` | true |
|
||||
|
||||
### `RCloneConfigFormInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `providerType` | `String` | — |
|
||||
| `showAdvanced` | `Boolean` | false |
|
||||
| `parameters` | `JSON` | — |
|
||||
|
||||
### `RemoveRoleFromApiKeyInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `apiKeyId` | `PrefixedID!` | — |
|
||||
| `role` | `Role!` | — |
|
||||
|
||||
### `UPSConfigInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `service` | `UPSServiceState` | — |
|
||||
| `upsCable` | `UPSCableType` | — |
|
||||
| `customUpsCable` | `String` | — |
|
||||
| `upsType` | `UPSType` | — |
|
||||
| `device` | `String` | — |
|
||||
| `overrideUpsCapacity` | `Int` | — |
|
||||
| `batteryLevel` | `Int` | — |
|
||||
| `minutes` | `Int` | — |
|
||||
| `timeout` | `Int` | — |
|
||||
| `killUps` | `UPSKillPower` | — |
|
||||
|
||||
### `UpdateApiKeyInput`
|
||||
|
||||
| Field | Type | Default |
|
||||
|-------|------|---------|
|
||||
| `id` | `PrefixedID!` | — |
|
||||
| `name` | `String` | — |
|
||||
| `description` | `String` | — |
|
||||
| `roles` | `[Role!]` | — |
|
||||
| `permissions` | `[AddPermissionInput!]` | — |
|
||||
|
||||
## Object Types
|
||||
|
||||
**94 object types**
|
||||
|
||||
| Type | Fields | Description |
|
||||
|------|--------|-------------|
|
||||
| `ActivationCode` | 11 | — |
|
||||
| `ApiConfig` | 5 | — |
|
||||
| `ApiKey` | 7 | — |
|
||||
| `ApiKeyFormSettings` | 4 | — |
|
||||
| `ApiKeyMutations` | 5 | API Key related mutations |
|
||||
| `ArrayCapacity` | 2 | — |
|
||||
| `ArrayDisk` | 24 | — |
|
||||
| `ArrayMutations` | 6 | — |
|
||||
| `Capacity` | 3 | — |
|
||||
| `Config` | 3 | — |
|
||||
| `ContainerHostConfig` | 1 | — |
|
||||
| `ContainerPort` | 4 | — |
|
||||
| `CoreVersions` | 3 | — |
|
||||
| `CpuLoad` | 8 | CPU load for a single core |
|
||||
| `CpuPackages` | 4 | — |
|
||||
| `CpuUtilization` | 3 | — |
|
||||
| `Customization` | 3 | — |
|
||||
| `CustomizationMutations` | 1 | Customization related mutations |
|
||||
| `Disk` | 20 | — |
|
||||
| `DiskPartition` | 3 | — |
|
||||
| `Docker` | 3 | — |
|
||||
| `DockerContainer` | 15 | — |
|
||||
| `DockerMutations` | 2 | — |
|
||||
| `DockerNetwork` | 15 | — |
|
||||
| `ExplicitStatusItem` | 2 | — |
|
||||
| `Flash` | 4 | — |
|
||||
| `FlashBackupStatus` | 2 | — |
|
||||
| `Info` | 11 | — |
|
||||
| `InfoBaseboard` | 8 | — |
|
||||
| `InfoCpu` | 20 | — |
|
||||
| `InfoDevices` | 5 | — |
|
||||
| `InfoDisplay` | 16 | — |
|
||||
| `InfoDisplayCase` | 5 | — |
|
||||
| `InfoGpu` | 7 | — |
|
||||
| `InfoMemory` | 2 | — |
|
||||
| `InfoNetwork` | 8 | — |
|
||||
| `InfoOs` | 15 | — |
|
||||
| `InfoPci` | 9 | — |
|
||||
| `InfoSystem` | 8 | — |
|
||||
| `InfoUsb` | 4 | — |
|
||||
| `InfoVersions` | 3 | — |
|
||||
| `KeyFile` | 2 | — |
|
||||
| `LogFile` | 4 | — |
|
||||
| `LogFileContent` | 4 | — |
|
||||
| `MemoryLayout` | 12 | — |
|
||||
| `MemoryUtilization` | 12 | — |
|
||||
| `Metrics` | 3 | System metrics including CPU and memory utilization |
|
||||
| `Notification` | 9 | — |
|
||||
| `NotificationCounts` | 4 | — |
|
||||
| `NotificationOverview` | 2 | — |
|
||||
| `Notifications` | 3 | — |
|
||||
| `OidcAuthorizationRule` | 3 | — |
|
||||
| `OidcConfiguration` | 2 | — |
|
||||
| `OidcProvider` | 15 | — |
|
||||
| `OidcSessionValidation` | 2 | — |
|
||||
| `OrganizerContainerResource` | 4 | — |
|
||||
| `OrganizerResource` | 4 | — |
|
||||
| `Owner` | 3 | — |
|
||||
| `PackageVersions` | 8 | — |
|
||||
| `ParityCheck` | 9 | — |
|
||||
| `ParityCheckMutations` | 4 | Parity check related mutations, WIP, response types and functionaliy will change |
|
||||
| `Permission` | 2 | — |
|
||||
| `Plugin` | 4 | — |
|
||||
| `ProfileModel` | 4 | — |
|
||||
| `PublicOidcProvider` | 6 | — |
|
||||
| `PublicPartnerInfo` | 4 | — |
|
||||
| `RCloneBackupConfigForm` | 3 | — |
|
||||
| `RCloneBackupSettings` | 3 | — |
|
||||
| `RCloneDrive` | 2 | — |
|
||||
| `RCloneMutations` | 2 | RClone related mutations |
|
||||
| `RCloneRemote` | 4 | — |
|
||||
| `Registration` | 6 | — |
|
||||
| `ResolvedOrganizerFolder` | 4 | — |
|
||||
| `ResolvedOrganizerV1` | 2 | — |
|
||||
| `ResolvedOrganizerView` | 4 | — |
|
||||
| `Server` | 10 | — |
|
||||
| `Service` | 5 | — |
|
||||
| `Settings` | 4 | — |
|
||||
| `Share` | 16 | — |
|
||||
| `SsoSettings` | 2 | — |
|
||||
| `Theme` | 7 | — |
|
||||
| `UPSBattery` | 3 | — |
|
||||
| `UPSConfiguration` | 14 | — |
|
||||
| `UPSDevice` | 6 | — |
|
||||
| `UPSPower` | 3 | — |
|
||||
| `UnifiedSettings` | 4 | — |
|
||||
| `UnraidArray` | 8 | — |
|
||||
| `UpdateSettingsResponse` | 3 | — |
|
||||
| `Uptime` | 1 | — |
|
||||
| `UserAccount` | 5 | — |
|
||||
| `Vars` | 143 | — |
|
||||
| `VmDomain` | 4 | — |
|
||||
| `VmMutations` | 7 | — |
|
||||
| `Vms` | 3 | — |
|
||||
|
||||
## Schema Statistics
|
||||
|
||||
| Category | Count |
|
||||
|----------|-------|
|
||||
| Query fields | 46 |
|
||||
| Mutation fields | 22 |
|
||||
| Subscription fields | 11 |
|
||||
| Object types | 94 |
|
||||
| Input types | 16 |
|
||||
| Enum types | 30 |
|
||||
| Scalar types | 10 |
|
||||
| Union types | 1 |
|
||||
| Interface types | 2 |
|
||||
| **Total types** | **156** |
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,7 +10,7 @@ build-backend = "hatchling.build"
|
||||
# ============================================================================
|
||||
[project]
|
||||
name = "unraid-mcp"
|
||||
version = "0.2.0"
|
||||
version = "1.0.1"
|
||||
description = "MCP Server for Unraid API - provides tools to interact with an Unraid server's GraphQL API"
|
||||
readme = "README.md"
|
||||
license = {file = "LICENSE"}
|
||||
@@ -71,13 +71,12 @@ classifiers = [
|
||||
# ============================================================================
|
||||
dependencies = [
|
||||
"python-dotenv>=1.1.1",
|
||||
"fastmcp>=2.14.5",
|
||||
"fastmcp>=3.0.0",
|
||||
"httpx>=0.28.1",
|
||||
"fastapi>=0.115.0",
|
||||
"uvicorn[standard]>=0.35.0",
|
||||
"websockets>=15.0.1",
|
||||
"rich>=14.1.0",
|
||||
"pytz>=2025.2",
|
||||
]
|
||||
|
||||
# ============================================================================
|
||||
@@ -108,8 +107,13 @@ only-include = ["unraid_mcp"]
|
||||
include = [
|
||||
"/unraid_mcp",
|
||||
"/tests",
|
||||
"/commands",
|
||||
"/skills",
|
||||
"/README.md",
|
||||
"/LICENSE",
|
||||
"/CLAUDE.md",
|
||||
"/AGENTS.md",
|
||||
"/GEMINI.md",
|
||||
"/pyproject.toml",
|
||||
"/.env.example",
|
||||
]
|
||||
@@ -121,6 +125,8 @@ exclude = [
|
||||
"/.docs",
|
||||
"/.full-review",
|
||||
"/docs",
|
||||
"/dist",
|
||||
"/logs",
|
||||
"*.pyc",
|
||||
"__pycache__",
|
||||
]
|
||||
@@ -170,6 +176,8 @@ select = [
|
||||
"PERF",
|
||||
# Ruff-specific rules
|
||||
"RUF",
|
||||
# flake8-bandit (security)
|
||||
"S",
|
||||
]
|
||||
ignore = [
|
||||
"E501", # line too long (handled by ruff formatter)
|
||||
@@ -188,7 +196,7 @@ ignore = [
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"__init__.py" = ["F401", "D104"]
|
||||
"tests/**/*.py" = ["D", "S101", "PLR2004"] # Allow asserts and magic values in tests
|
||||
"tests/**/*.py" = ["D", "S101", "S105", "S106", "S107", "PLR2004", "N815"] # Allow test-only patterns; N815 permits camelCase Pydantic fields that mirror GraphQL response keys
|
||||
|
||||
[tool.ruff.lint.pydocstyle]
|
||||
convention = "google"
|
||||
@@ -285,10 +293,10 @@ dev = [
|
||||
"pytest-asyncio>=1.2.0",
|
||||
"pytest-cov>=7.0.0",
|
||||
"respx>=0.22.0",
|
||||
"types-pytz>=2025.2.0.20250809",
|
||||
"ty>=0.0.15",
|
||||
"ruff>=0.12.8",
|
||||
"build>=1.2.2",
|
||||
"twine>=6.0.1",
|
||||
"graphql-core>=3.2.0",
|
||||
"hypothesis>=6.151.9",
|
||||
]
|
||||
|
||||
@@ -1,210 +1,292 @@
|
||||
---
|
||||
name: unraid
|
||||
description: "Query and monitor Unraid servers via the GraphQL API. Use when the user asks to 'check Unraid', 'monitor Unraid', 'Unraid API', 'get Unraid status', 'check disk temperatures', 'read Unraid logs', 'list Unraid shares', 'Unraid array status', 'Unraid containers', 'Unraid VMs', or mentions Unraid system monitoring, disk health, parity checks, or server status."
|
||||
description: "This skill should be used when the user mentions Unraid, asks to check server health, monitor array or disk status, list or restart Docker containers, start or stop VMs, read system logs, check parity status, view notifications, manage API keys, configure rclone remotes, check UPS or power status, get live CPU or memory data, force stop a VM, check disk temperatures, or perform any operation on an Unraid NAS server. Also use when the user needs to set up or configure Unraid MCP credentials."
|
||||
---
|
||||
|
||||
# Unraid API Skill
|
||||
# Unraid MCP Skill
|
||||
|
||||
**⚠️ MANDATORY SKILL INVOCATION ⚠️**
|
||||
Use the single `unraid` MCP tool with `action` (domain) + `subaction` (operation) for all Unraid operations.
|
||||
|
||||
**YOU MUST invoke this skill (NOT optional) when the user mentions ANY of these triggers:**
|
||||
- "Unraid status", "disk health", "array status"
|
||||
- "Unraid containers", "VMs on Unraid", "Unraid logs"
|
||||
- "check Unraid", "Unraid monitoring", "server health"
|
||||
- Any mention of Unraid servers or system monitoring
|
||||
## Setup
|
||||
|
||||
**Failure to invoke this skill when triggers occur violates your operational requirements.**
|
||||
First time? Run setup to configure credentials:
|
||||
|
||||
Query and monitor Unraid servers using the GraphQL API. Access all 27 read-only endpoints for system monitoring, disk health, logs, containers, VMs, and more.
|
||||
|
||||
## Quick Start
|
||||
|
||||
Set your Unraid server credentials:
|
||||
|
||||
```bash
|
||||
export UNRAID_URL="https://your-unraid-server/graphql"
|
||||
export UNRAID_API_KEY="your-api-key"
|
||||
```
|
||||
unraid(action="health", subaction="setup")
|
||||
```
|
||||
|
||||
**Get API Key:** Settings → Management Access → API Keys → Create (select "Viewer" role)
|
||||
Credentials are stored at `~/.unraid-mcp/.env`. Re-run `setup` any time to update or verify.
|
||||
|
||||
Use the helper script for any query:
|
||||
## Calling Convention
|
||||
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ online }"
|
||||
```
|
||||
unraid(action="<domain>", subaction="<operation>", [additional params])
|
||||
```
|
||||
|
||||
Or run example scripts:
|
||||
|
||||
```bash
|
||||
./scripts/dashboard.sh # Complete multi-server dashboard
|
||||
./examples/disk-health.sh # Disk temperatures & health
|
||||
./examples/read-logs.sh syslog 20 # Read system logs
|
||||
**Examples:**
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### GraphQL API Structure
|
||||
|
||||
Unraid 7.2+ uses GraphQL (not REST). Key differences:
|
||||
- **Single endpoint:** `/graphql` for all queries
|
||||
- **Request exactly what you need:** Specify fields in query
|
||||
- **Strongly typed:** Use introspection to discover fields
|
||||
- **No container logs:** Docker container output logs not accessible
|
||||
|
||||
### Two Resources for Stats
|
||||
|
||||
- **`info`** - Static hardware specs (CPU model, cores, OS version)
|
||||
- **`metrics`** - Real-time usage (CPU %, memory %, current load)
|
||||
|
||||
Always use `metrics` for monitoring, `info` for specifications.
|
||||
|
||||
## Common Tasks
|
||||
|
||||
### System Monitoring
|
||||
|
||||
**Check if server is online:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ online }"
|
||||
```
|
||||
|
||||
**Get CPU and memory usage:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ metrics { cpu { percentTotal } memory { used total percentTotal } } }"
|
||||
```
|
||||
|
||||
**Complete dashboard:**
|
||||
```bash
|
||||
./scripts/dashboard.sh
|
||||
```
|
||||
|
||||
### Disk Management
|
||||
|
||||
**Check disk health and temperatures:**
|
||||
```bash
|
||||
./examples/disk-health.sh
|
||||
```
|
||||
|
||||
**Get array status:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ array { state parityCheckStatus { status progress errors } } }"
|
||||
```
|
||||
|
||||
**List all physical disks (including cache/USB):**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ disks { name } }"
|
||||
```
|
||||
|
||||
### Storage Shares
|
||||
|
||||
**List network shares:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ shares { name comment } }"
|
||||
```
|
||||
|
||||
### Logs
|
||||
|
||||
**List available logs:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ logFiles { name size modifiedAt } }"
|
||||
```
|
||||
|
||||
**Read log content:**
|
||||
```bash
|
||||
./examples/read-logs.sh syslog 20
|
||||
```
|
||||
|
||||
### Containers & VMs
|
||||
|
||||
**List Docker containers:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ docker { containers { names image state status } } }"
|
||||
```
|
||||
|
||||
**List VMs:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ vms { domain { name state } } }"
|
||||
```
|
||||
|
||||
**Note:** Container output logs are NOT accessible via API. Use `docker logs` via SSH.
|
||||
|
||||
### Notifications
|
||||
|
||||
**Get notification counts:**
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ notifications { overview { unread { info warning alert total } } } }"
|
||||
```
|
||||
|
||||
## Helper Script Usage
|
||||
|
||||
The `scripts/unraid-query.sh` helper supports:
|
||||
|
||||
```bash
|
||||
# Basic usage
|
||||
./scripts/unraid-query.sh -u URL -k API_KEY -q "QUERY"
|
||||
|
||||
# Use environment variables
|
||||
export UNRAID_URL="https://unraid.local/graphql"
|
||||
export UNRAID_API_KEY="your-key"
|
||||
./scripts/unraid-query.sh -q "{ online }"
|
||||
|
||||
# Format options
|
||||
-f json # Raw JSON (default)
|
||||
-f pretty # Pretty-printed JSON
|
||||
-f raw # Just the data (no wrapper)
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
### Reference Files
|
||||
|
||||
For detailed documentation, consult:
|
||||
- **`references/endpoints.md`** - Complete list of all 27 API endpoints
|
||||
- **`references/troubleshooting.md`** - Common errors and solutions
|
||||
- **`references/api-reference.md`** - Detailed field documentation
|
||||
|
||||
### Helper Scripts
|
||||
|
||||
- **`scripts/unraid-query.sh`** - Main GraphQL query tool
|
||||
- **`scripts/dashboard.sh`** - Automated multi-server inventory reporter
|
||||
|
||||
## Quick Command Reference
|
||||
|
||||
```bash
|
||||
# System status
|
||||
./scripts/unraid-query.sh -q "{ online metrics { cpu { percentTotal } } }"
|
||||
|
||||
# Disk health
|
||||
./examples/disk-health.sh
|
||||
|
||||
# Array status
|
||||
./scripts/unraid-query.sh -q "{ array { state } }"
|
||||
|
||||
# Read logs
|
||||
./examples/read-logs.sh syslog 20
|
||||
|
||||
# Complete dashboard
|
||||
./scripts/dashboard.sh
|
||||
|
||||
# List shares
|
||||
./scripts/unraid-query.sh -q "{ shares { name } }"
|
||||
|
||||
# List containers
|
||||
./scripts/unraid-query.sh -q "{ docker { containers { names state } } }"
|
||||
unraid(action="system", subaction="overview")
|
||||
unraid(action="docker", subaction="list")
|
||||
unraid(action="health", subaction="check")
|
||||
unraid(action="array", subaction="parity_status")
|
||||
unraid(action="disk", subaction="disks")
|
||||
unraid(action="vm", subaction="list")
|
||||
unraid(action="notification", subaction="overview")
|
||||
unraid(action="live", subaction="cpu")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Agent Tool Usage Requirements
|
||||
## All Domains and Subactions
|
||||
|
||||
**CRITICAL:** When invoking scripts from this skill via the zsh-tool, **ALWAYS use `pty: true`**.
|
||||
### `system` — Server Information
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `overview` | Complete system summary (recommended starting point) |
|
||||
| `server` | Hostname, version, uptime |
|
||||
| `servers` | All known Unraid servers |
|
||||
| `array` | Array status and disk list |
|
||||
| `network` | Network interfaces and config |
|
||||
| `registration` | License and registration status |
|
||||
| `variables` | Environment variables |
|
||||
| `metrics` | Real-time CPU, memory, I/O usage |
|
||||
| `services` | Running services status |
|
||||
| `display` | Display settings |
|
||||
| `config` | System configuration |
|
||||
| `online` | Quick online status check |
|
||||
| `owner` | Server owner information |
|
||||
| `settings` | User settings and preferences |
|
||||
| `flash` | USB flash drive details |
|
||||
| `ups_devices` | List all UPS devices |
|
||||
| `ups_device` | Single UPS device (requires `device_id`) |
|
||||
| `ups_config` | UPS configuration |
|
||||
|
||||
Without PTY mode, command output will not be visible even though commands execute successfully.
|
||||
### `health` — Diagnostics
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `check` | Comprehensive health check — connectivity, array, disks, containers, VMs, resources |
|
||||
| `test_connection` | Test API connectivity and authentication |
|
||||
| `diagnose` | Detailed diagnostic report with troubleshooting recommendations |
|
||||
| `setup` | Configure credentials interactively (stores to `~/.unraid-mcp/.env`) |
|
||||
|
||||
**Correct invocation pattern:**
|
||||
```typescript
|
||||
<invoke name="mcp__plugin_zsh-tool_zsh-tool__zsh">
|
||||
<parameter name="command">./skills/SKILL_NAME/scripts/SCRIPT.sh [args]</parameter>
|
||||
<parameter name="pty">true</parameter>
|
||||
</invoke>
|
||||
### `array` — Array & Parity
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `parity_status` | Current parity check progress and status |
|
||||
| `parity_history` | Historical parity check results |
|
||||
| `parity_start` | Start a parity check |
|
||||
| `parity_pause` | Pause a running parity check |
|
||||
| `parity_resume` | Resume a paused parity check |
|
||||
| `parity_cancel` | Cancel a running parity check |
|
||||
| `start_array` | Start the array |
|
||||
| `stop_array` | ⚠️ Stop the array (requires `confirm=True`) |
|
||||
| `add_disk` | Add a disk to the array (requires `slot`, `id`) |
|
||||
| `remove_disk` | ⚠️ Remove a disk (requires `slot`, `confirm=True`) |
|
||||
| `mount_disk` | Mount a disk |
|
||||
| `unmount_disk` | Unmount a disk |
|
||||
| `clear_disk_stats` | ⚠️ Clear disk statistics (requires `confirm=True`) |
|
||||
|
||||
### `disk` — Storage & Logs
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `shares` | List network shares |
|
||||
| `disks` | All physical disks with health and temperatures |
|
||||
| `disk_details` | Detailed info for a specific disk (requires `disk_id`) |
|
||||
| `log_files` | List available log files |
|
||||
| `logs` | Read log content (requires `path`; optional `lines`) |
|
||||
| `flash_backup` | ⚠️ Trigger a flash backup (requires `confirm=True`) |
|
||||
|
||||
### `docker` — Containers
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list` | All containers with status, image, state |
|
||||
| `details` | Single container details (requires container identifier) |
|
||||
| `start` | Start a container (requires container identifier) |
|
||||
| `stop` | Stop a container (requires container identifier) |
|
||||
| `restart` | Restart a container (requires container identifier) |
|
||||
| `networks` | List Docker networks |
|
||||
| `network_details` | Details for a specific network (requires `network_id`) |
|
||||
|
||||
**Container Identification:** Name, ID, or partial name (fuzzy match supported).
|
||||
|
||||
### `vm` — Virtual Machines
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list` | All VMs with state |
|
||||
| `details` | Single VM details (requires `vm_id`) |
|
||||
| `start` | Start a VM (requires `vm_id`) |
|
||||
| `stop` | Gracefully stop a VM (requires `vm_id`) |
|
||||
| `pause` | Pause a VM (requires `vm_id`) |
|
||||
| `resume` | Resume a paused VM (requires `vm_id`) |
|
||||
| `reboot` | Reboot a VM (requires `vm_id`) |
|
||||
| `force_stop` | ⚠️ Force stop a VM (requires `vm_id`, `confirm=True`) |
|
||||
| `reset` | ⚠️ Hard reset a VM (requires `vm_id`, `confirm=True`) |
|
||||
|
||||
### `notification` — Notifications
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `overview` | Notification counts (unread, archived by type) |
|
||||
| `list` | List notifications (optional `filter`, `limit`, `offset`) |
|
||||
| `mark_unread` | Mark a notification as unread (requires `notification_id`) |
|
||||
| `create` | Create a notification (requires `title`, `subject`, `description`, `importance`) |
|
||||
| `archive` | Archive a notification (requires `notification_id`) |
|
||||
| `delete` | ⚠️ Delete a notification (requires `notification_id`, `notification_type`, `confirm=True`) |
|
||||
| `delete_archived` | ⚠️ Delete all archived (requires `confirm=True`) |
|
||||
| `archive_all` | Archive all unread notifications |
|
||||
| `archive_many` | Archive multiple (requires `ids` list) |
|
||||
| `unarchive_many` | Unarchive multiple (requires `ids` list) |
|
||||
| `unarchive_all` | Unarchive all archived notifications |
|
||||
| `recalculate` | Recalculate notification counts |
|
||||
|
||||
### `key` — API Keys
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list` | All API keys |
|
||||
| `get` | Single key details (requires `key_id`) |
|
||||
| `create` | Create a new key (requires `name`, `roles`) |
|
||||
| `update` | Update a key (requires `key_id`) |
|
||||
| `delete` | ⚠️ Delete a key (requires `key_id`, `confirm=True`) |
|
||||
| `add_role` | Add a role to a key (requires `key_id`, `role`) |
|
||||
| `remove_role` | Remove a role from a key (requires `key_id`, `role`) |
|
||||
|
||||
### `plugin` — Plugins
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list` | All installed plugins |
|
||||
| `add` | Install plugins (requires `names` — list of plugin names) |
|
||||
| `remove` | ⚠️ Uninstall plugins (requires `names` — list of plugin names, `confirm=True`) |
|
||||
|
||||
### `rclone` — Cloud Storage
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `list_remotes` | List configured rclone remotes |
|
||||
| `config_form` | Get configuration form for a remote type |
|
||||
| `create_remote` | Create a new remote (requires `name`, `type`, `fields`) |
|
||||
| `delete_remote` | ⚠️ Delete a remote (requires `name`, `confirm=True`) |
|
||||
|
||||
### `setting` — System Settings
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `update` | Update system settings (requires `settings` object) |
|
||||
| `configure_ups` | ⚠️ Configure UPS settings (requires `confirm=True`) |
|
||||
|
||||
### `customization` — Theme & Appearance
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `theme` | Current theme settings |
|
||||
| `public_theme` | Public-facing theme |
|
||||
| `is_initial_setup` | Check if initial setup is complete |
|
||||
| `sso_enabled` | Check SSO status |
|
||||
| `set_theme` | Update theme (requires theme parameters) |
|
||||
|
||||
### `oidc` — SSO / OpenID Connect
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `providers` | List configured OIDC providers |
|
||||
| `provider` | Single provider details (requires `provider_id`) |
|
||||
| `configuration` | OIDC configuration |
|
||||
| `public_providers` | Public-facing provider list |
|
||||
| `validate_session` | Validate current SSO session |
|
||||
|
||||
### `user` — Current User
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `me` | Current authenticated user info |
|
||||
|
||||
### `live` — Real-Time Subscriptions
|
||||
These use persistent WebSocket connections. Returns a "connecting" placeholder on the first call — retry momentarily for live data.
|
||||
|
||||
| Subaction | Description |
|
||||
|-----------|-------------|
|
||||
| `cpu` | Live CPU utilization |
|
||||
| `memory` | Live memory usage |
|
||||
| `cpu_telemetry` | Detailed CPU telemetry |
|
||||
| `array_state` | Live array state changes |
|
||||
| `parity_progress` | Live parity check progress |
|
||||
| `ups_status` | Live UPS status |
|
||||
| `notifications_overview` | Live notification counts |
|
||||
| `owner` | Live owner info |
|
||||
| `server_status` | Live server status |
|
||||
| `log_tail` | Live log tail stream |
|
||||
| `notification_feed` | Live notification feed |
|
||||
|
||||
---
|
||||
|
||||
## Destructive Actions
|
||||
|
||||
All require `confirm=True` as an explicit parameter. Without it, the action is blocked and elicitation is triggered.
|
||||
|
||||
| Domain | Subaction | Risk |
|
||||
|--------|-----------|------|
|
||||
| `array` | `stop_array` | Stops array while containers/VMs may use shares |
|
||||
| `array` | `remove_disk` | Removes disk from array |
|
||||
| `array` | `clear_disk_stats` | Clears disk statistics permanently |
|
||||
| `vm` | `force_stop` | Hard kills VM without graceful shutdown |
|
||||
| `vm` | `reset` | Hard resets VM |
|
||||
| `notification` | `delete` | Permanently deletes a notification |
|
||||
| `notification` | `delete_archived` | Permanently deletes all archived notifications |
|
||||
| `rclone` | `delete_remote` | Removes a cloud storage remote |
|
||||
| `key` | `delete` | Permanently deletes an API key |
|
||||
| `disk` | `flash_backup` | Triggers flash backup operation |
|
||||
| `setting` | `configure_ups` | Modifies UPS configuration |
|
||||
| `plugin` | `remove` | Uninstalls a plugin |
|
||||
|
||||
---
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### First-time setup
|
||||
```
|
||||
unraid(action="health", subaction="setup")
|
||||
unraid(action="health", subaction="check")
|
||||
```
|
||||
|
||||
### System health overview
|
||||
```
|
||||
unraid(action="system", subaction="overview")
|
||||
unraid(action="health", subaction="check")
|
||||
```
|
||||
|
||||
### Container management
|
||||
```
|
||||
unraid(action="docker", subaction="list")
|
||||
unraid(action="docker", subaction="details", container_id="plex")
|
||||
unraid(action="docker", subaction="restart", container_id="sonarr")
|
||||
```
|
||||
|
||||
### Array and disk status
|
||||
```
|
||||
unraid(action="array", subaction="parity_status")
|
||||
unraid(action="disk", subaction="disks")
|
||||
unraid(action="system", subaction="array")
|
||||
```
|
||||
|
||||
### Read logs
|
||||
```
|
||||
unraid(action="disk", subaction="log_files")
|
||||
unraid(action="disk", subaction="logs", path="syslog", lines=50)
|
||||
```
|
||||
|
||||
### Live monitoring
|
||||
```
|
||||
unraid(action="live", subaction="cpu")
|
||||
unraid(action="live", subaction="memory")
|
||||
unraid(action="live", subaction="array_state")
|
||||
```
|
||||
|
||||
### VM operations
|
||||
```
|
||||
unraid(action="vm", subaction="list")
|
||||
unraid(action="vm", subaction="start", vm_id="<id>")
|
||||
unraid(action="vm", subaction="force_stop", vm_id="<id>", confirm=True)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- **Rate limit:** 100 requests / 10 seconds
|
||||
- **Log path validation:** Only `/var/log/`, `/boot/logs/`, `/mnt/` prefixes accepted
|
||||
- **Container logs:** Docker container stdout/stderr are NOT accessible via API — use SSH + `docker logs`
|
||||
- **`arraySubscription`:** Known Unraid API bug — `live/array_state` may show "connecting" indefinitely
|
||||
- **Event-driven subs** (`notifications_overview`, `owner`, `server_status`, `ups_status`): Only populate cache on first real server event
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Unraid API - Complete Reference Guide
|
||||
|
||||
> **⚠️ DEVELOPER REFERENCE ONLY** — This file documents the raw GraphQL API schema for development and maintenance purposes (adding new queries/mutations). Do NOT use these curl/GraphQL examples for MCP tool usage. Use `unraid(action=..., subaction=...)` calls instead. See `SKILL.md` for the correct calling convention.
|
||||
|
||||
**Tested on:** Unraid 7.2 x86_64
|
||||
**Date:** 2026-01-21
|
||||
**API Type:** GraphQL
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
> **⚠️ DEVELOPER REFERENCE ONLY** — This file documents raw GraphQL endpoints for development purposes. For MCP tool usage, use `unraid(action=..., subaction=...)` calls as documented in `SKILL.md`.
|
||||
|
||||
# Unraid API Endpoints Reference
|
||||
|
||||
Complete list of available GraphQL read-only endpoints in Unraid 7.2+.
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
> **⚠️ DEVELOPER REFERENCE ONLY** — Full GraphQL SDL from live API introspection. Use this to verify field names and types when adding new queries/mutations to the MCP server. Not for runtime agent usage.
|
||||
|
||||
"""
|
||||
Indicates exactly one field must be supplied and this field must not be `null`.
|
||||
"""
|
||||
|
||||
@@ -1,219 +1,126 @@
|
||||
# Unraid API Quick Reference
|
||||
# Unraid MCP — Quick Reference
|
||||
|
||||
Quick reference for the most common Unraid GraphQL API queries.
|
||||
All operations use: `unraid(action="<domain>", subaction="<operation>", [params])`
|
||||
|
||||
## Setup
|
||||
## Most Common Operations
|
||||
|
||||
```bash
|
||||
# Set environment variables
|
||||
export UNRAID_URL="https://your-unraid-server/graphql"
|
||||
export UNRAID_API_KEY="your-api-key-here"
|
||||
### Health & Status
|
||||
|
||||
# Or use the helper script directly
|
||||
./scripts/unraid-query.sh -u "$UNRAID_URL" -k "$UNRAID_API_KEY" -q "{ online }"
|
||||
```python
|
||||
unraid(action="health", subaction="setup") # First-time credential setup
|
||||
unraid(action="health", subaction="check") # Full health check
|
||||
unraid(action="health", subaction="test_connection") # Quick connectivity test
|
||||
unraid(action="system", subaction="overview") # Complete server summary
|
||||
unraid(action="system", subaction="metrics") # CPU / RAM / I/O usage
|
||||
unraid(action="system", subaction="online") # Online status
|
||||
```
|
||||
|
||||
## Common Queries
|
||||
### Array & Disks
|
||||
|
||||
### System Status
|
||||
```graphql
|
||||
{
|
||||
online
|
||||
metrics {
|
||||
cpu { percentTotal }
|
||||
memory { total used free percentTotal }
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="system", subaction="array") # Array status overview
|
||||
unraid(action="disk", subaction="disks") # All disks with temps & health
|
||||
unraid(action="array", subaction="parity_status") # Current parity check
|
||||
unraid(action="array", subaction="parity_history") # Past parity results
|
||||
unraid(action="array", subaction="parity_start") # Start parity check
|
||||
unraid(action="array", subaction="stop_array", confirm=True) # ⚠️ Stop array
|
||||
```
|
||||
|
||||
### Array Status
|
||||
```graphql
|
||||
{
|
||||
array {
|
||||
state
|
||||
parityCheckStatus { status progress errors }
|
||||
}
|
||||
}
|
||||
```
|
||||
### Logs
|
||||
|
||||
### Disk List with Temperatures
|
||||
```graphql
|
||||
{
|
||||
array {
|
||||
disks {
|
||||
name
|
||||
device
|
||||
temp
|
||||
status
|
||||
fsSize
|
||||
fsFree
|
||||
isSpinning
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### All Physical Disks (including USB/SSDs)
|
||||
```graphql
|
||||
{
|
||||
disks {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Network Shares
|
||||
```graphql
|
||||
{
|
||||
shares {
|
||||
name
|
||||
comment
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="disk", subaction="log_files") # List available logs
|
||||
unraid(action="disk", subaction="logs", log_path="syslog", tail_lines=50) # Read syslog
|
||||
unraid(action="disk", subaction="logs", log_path="/var/log/syslog") # Full path also works
|
||||
unraid(action="live", subaction="log_tail", log_path="/var/log/syslog") # Live tail
|
||||
```
|
||||
|
||||
### Docker Containers
|
||||
```graphql
|
||||
{
|
||||
docker {
|
||||
containers {
|
||||
id
|
||||
names
|
||||
image
|
||||
state
|
||||
status
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```python
|
||||
unraid(action="docker", subaction="list")
|
||||
unraid(action="docker", subaction="details", container_id="plex")
|
||||
unraid(action="docker", subaction="start", container_id="nginx")
|
||||
unraid(action="docker", subaction="stop", container_id="nginx")
|
||||
unraid(action="docker", subaction="restart", container_id="sonarr")
|
||||
unraid(action="docker", subaction="networks")
|
||||
```
|
||||
|
||||
### Virtual Machines
|
||||
```graphql
|
||||
{
|
||||
vms {
|
||||
id
|
||||
name
|
||||
state
|
||||
cpus
|
||||
memory
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### List Log Files
|
||||
```graphql
|
||||
{
|
||||
logFiles {
|
||||
name
|
||||
size
|
||||
modifiedAt
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Read Log Content
|
||||
```graphql
|
||||
{
|
||||
logFile(path: "syslog", lines: 20) {
|
||||
content
|
||||
totalLines
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### System Info
|
||||
```graphql
|
||||
{
|
||||
info {
|
||||
time
|
||||
cpu { model cores threads }
|
||||
os { distro release }
|
||||
system { manufacturer model }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### UPS Devices
|
||||
```graphql
|
||||
{
|
||||
upsDevices {
|
||||
id
|
||||
name
|
||||
status
|
||||
charge
|
||||
load
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="vm", subaction="list")
|
||||
unraid(action="vm", subaction="details", vm_id="<id>")
|
||||
unraid(action="vm", subaction="start", vm_id="<id>")
|
||||
unraid(action="vm", subaction="stop", vm_id="<id>")
|
||||
unraid(action="vm", subaction="reboot", vm_id="<id>")
|
||||
unraid(action="vm", subaction="force_stop", vm_id="<id>", confirm=True) # ⚠️
|
||||
```
|
||||
|
||||
### Notifications
|
||||
|
||||
**Counts:**
|
||||
```graphql
|
||||
{
|
||||
notifications {
|
||||
overview {
|
||||
unread { info warning alert total }
|
||||
archive { info warning alert total }
|
||||
}
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="notification", subaction="overview")
|
||||
unraid(action="notification", subaction="list", list_type="UNREAD", limit=10)
|
||||
unraid(action="notification", subaction="archive", notification_id="<id>")
|
||||
unraid(action="notification", subaction="create", title="Test", subject="Subject",
|
||||
description="Body", importance="normal")
|
||||
```
|
||||
|
||||
**List Unread:**
|
||||
```graphql
|
||||
{
|
||||
notifications {
|
||||
list(filter: { type: UNREAD, offset: 0, limit: 10 }) {
|
||||
id
|
||||
subject
|
||||
description
|
||||
timestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
### API Keys
|
||||
|
||||
```python
|
||||
unraid(action="key", subaction="list")
|
||||
unraid(action="key", subaction="create", name="my-key", roles=["viewer"])
|
||||
unraid(action="key", subaction="delete", key_id="<id>", confirm=True) # ⚠️
|
||||
```
|
||||
|
||||
**List Archived:**
|
||||
```graphql
|
||||
{
|
||||
notifications {
|
||||
list(filter: { type: ARCHIVE, offset: 0, limit: 10 }) {
|
||||
id
|
||||
subject
|
||||
description
|
||||
timestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
### Plugins
|
||||
|
||||
```python
|
||||
unraid(action="plugin", subaction="list")
|
||||
unraid(action="plugin", subaction="add", names=["community.applications"])
|
||||
unraid(action="plugin", subaction="remove", names=["old.plugin"], confirm=True) # ⚠️
|
||||
```
|
||||
|
||||
## Field Name Notes
|
||||
### rclone
|
||||
|
||||
- Use `metrics` for real-time usage (CPU/memory percentages)
|
||||
- Use `info` for hardware specs (cores, model, etc.)
|
||||
- Temperature field is `temp` (not `temperature`)
|
||||
- Status field is `state` for array (not `status`)
|
||||
- Sizes are in kilobytes
|
||||
- Temperatures are in Celsius
|
||||
|
||||
## Response Structure
|
||||
|
||||
All responses follow this pattern:
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"queryName": { ... }
|
||||
}
|
||||
}
|
||||
```python
|
||||
unraid(action="rclone", subaction="list_remotes")
|
||||
unraid(action="rclone", subaction="delete_remote", name="<remote>", confirm=True) # ⚠️
|
||||
```
|
||||
|
||||
Errors appear in:
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
{ "message": "..." }
|
||||
]
|
||||
}
|
||||
### Live Subscriptions (real-time)
|
||||
|
||||
```python
|
||||
unraid(action="live", subaction="cpu")
|
||||
unraid(action="live", subaction="memory")
|
||||
unraid(action="live", subaction="parity_progress")
|
||||
unraid(action="live", subaction="log_tail")
|
||||
unraid(action="live", subaction="notification_feed")
|
||||
unraid(action="live", subaction="ups_status")
|
||||
```
|
||||
|
||||
> Returns `{"status": "connecting"}` on first call — retry momentarily.
|
||||
|
||||
---
|
||||
|
||||
## Domain → action= Mapping
|
||||
|
||||
| Old tool name (pre-v1.0) | New `action=` |
|
||||
|--------------------------|---------------|
|
||||
| `unraid_info` | `system` |
|
||||
| `unraid_health` | `health` |
|
||||
| `unraid_array` | `array` |
|
||||
| `unraid_storage` | `disk` |
|
||||
| `unraid_docker` | `docker` |
|
||||
| `unraid_vm` | `vm` |
|
||||
| `unraid_notifications` | `notification` |
|
||||
| `unraid_keys` | `key` |
|
||||
| `unraid_plugins` | `plugin` |
|
||||
| `unraid_rclone` | `rclone` |
|
||||
| `unraid_settings` | `setting` |
|
||||
| `unraid_customization` | `customization` |
|
||||
| `unraid_oidc` | `oidc` |
|
||||
| `unraid_users` | `user` |
|
||||
| `unraid_live` | `live` |
|
||||
|
||||
@@ -1,36 +1,109 @@
|
||||
# Unraid API Troubleshooting Guide
|
||||
# Unraid MCP — Troubleshooting Guide
|
||||
|
||||
Common issues and solutions when working with the Unraid GraphQL API.
|
||||
## Credentials Not Configured
|
||||
|
||||
## "Cannot query field" error
|
||||
**Error:** `CredentialsNotConfiguredError` or message containing `~/.unraid-mcp/.env`
|
||||
|
||||
Field name doesn't exist in your Unraid version. Use introspection to find valid fields:
|
||||
**Fix:** Run setup to configure credentials interactively:
|
||||
|
||||
```bash
|
||||
./scripts/unraid-query.sh -q "{ __type(name: \"TypeName\") { fields { name } } }"
|
||||
```python
|
||||
unraid(action="health", subaction="setup")
|
||||
```
|
||||
|
||||
## "API key validation failed"
|
||||
- Check API key is correct and not truncated
|
||||
- Verify key has appropriate permissions (use "Viewer" role)
|
||||
- Ensure URL includes `/graphql` endpoint (e.g. `http://host/graphql`)
|
||||
This writes `UNRAID_API_URL` and `UNRAID_API_KEY` to `~/.unraid-mcp/.env`. Re-run at any time to update or rotate credentials.
|
||||
|
||||
## Empty results
|
||||
Many queries return empty arrays when no data exists:
|
||||
- `docker.containers` - No containers running
|
||||
- `vms` - No VMs configured (or VM service disabled)
|
||||
- `notifications` - No active alerts
|
||||
- `plugins` - No plugins installed
|
||||
---
|
||||
|
||||
This is normal behavior, not an error. Ensure your scripts handle empty arrays gracefully.
|
||||
## Connection Failed / API Unreachable
|
||||
|
||||
## "VMs are not available" (GraphQL Error)
|
||||
If the VM manager is disabled in Unraid settings, querying `{ vms { ... } }` will return a GraphQL error.
|
||||
**Solution:** Check if VM service is enabled before querying, or use error handling (like `IGNORE_ERRORS=true` in dashboard scripts) to process partial data.
|
||||
**Symptoms:** Timeout, connection refused, network error
|
||||
|
||||
## URL connection issues
|
||||
- Use HTTPS (not HTTP) for remote access if configured
|
||||
- For local access: `http://unraid-server-ip/graphql`
|
||||
- For Unraid Connect: Use provided URL with token in hostname
|
||||
- Use `-k` (insecure) with curl if using self-signed certs on local HTTPS
|
||||
- Use `-L` (follow redirects) if Unraid redirects HTTP to HTTPS
|
||||
**Diagnostic steps:**
|
||||
|
||||
1. Test basic connectivity:
|
||||
|
||||
```python
|
||||
unraid(action="health", subaction="test_connection")
|
||||
```
|
||||
|
||||
2. Full diagnostic report:
|
||||
|
||||
```python
|
||||
unraid(action="health", subaction="diagnose")
|
||||
```
|
||||
|
||||
3. Check that `UNRAID_API_URL` in `~/.unraid-mcp/.env` points to the correct Unraid GraphQL endpoint.
|
||||
|
||||
4. Verify the API key has the required roles. Get a new key: **Unraid UI → Settings → Management Access → API Keys → Create** (select "Viewer" role for read-only, or appropriate roles for mutations).
|
||||
|
||||
---
|
||||
|
||||
## Invalid Action / Subaction
|
||||
|
||||
**Error:** `Invalid action 'X'` or `Invalid subaction 'X' for action 'Y'`
|
||||
|
||||
**Fix:** Check the domain table in `SKILL.md` for the exact `action=` and `subaction=` strings. Common mistakes:
|
||||
|
||||
| Wrong | Correct |
|
||||
|-------|---------|
|
||||
| `action="info"` | `action="system"` |
|
||||
| `action="notifications"` | `action="notification"` |
|
||||
| `action="keys"` | `action="key"` |
|
||||
| `action="plugins"` | `action="plugin"` |
|
||||
| `action="settings"` | `action="setting"` |
|
||||
| `subaction="unread"` | `subaction="mark_unread"` |
|
||||
|
||||
---
|
||||
|
||||
## Destructive Action Blocked
|
||||
|
||||
**Error:** `Action 'X' was not confirmed. Re-run with confirm=True to bypass elicitation.`
|
||||
|
||||
**Fix:** Add `confirm=True` to the call:
|
||||
|
||||
```python
|
||||
unraid(action="array", subaction="stop_array", confirm=True)
|
||||
unraid(action="vm", subaction="force_stop", vm_id="<id>", confirm=True)
|
||||
```
|
||||
|
||||
See the Destructive Actions table in `SKILL.md` for the full list.
|
||||
|
||||
---
|
||||
|
||||
## Live Subscription Returns "Connecting"
|
||||
|
||||
**Symptoms:** `unraid(action="live", ...)` returns `{"status": "connecting"}`
|
||||
|
||||
**Explanation:** The persistent WebSocket subscription has not yet received its first event. Retry in a moment.
|
||||
|
||||
**Known issue:** `live/array_state` uses `arraySubscription` which has a known Unraid API bug (returns null for a non-nullable field). This subscription will always show "connecting."
|
||||
|
||||
**Event-driven subscriptions** (`live/notifications_overview`, `live/owner`, `live/server_status`, `live/ups_status`) only populate when the server emits a change event. If the server is idle, these may never populate during a session.
|
||||
|
||||
**Workaround for array state:** Use `unraid(action="system", subaction="array")` for a synchronous snapshot instead.
|
||||
|
||||
---
|
||||
|
||||
## Rate Limit Exceeded
|
||||
|
||||
**Limit:** 100 requests / 10 seconds
|
||||
|
||||
**Symptoms:** HTTP 429 or rate limit error
|
||||
|
||||
**Fix:** Space out requests. Avoid polling in tight loops. Use `live/` subscriptions for real-time data instead of polling `system/metrics` repeatedly.
|
||||
|
||||
---
|
||||
|
||||
## Log Path Rejected
|
||||
|
||||
**Error:** `Invalid log path`
|
||||
|
||||
**Valid log path prefixes:** `/var/log/`, `/boot/logs/`, `/mnt/`
|
||||
|
||||
Use `unraid(action="disk", subaction="log_files")` to list available logs before reading.
|
||||
|
||||
---
|
||||
|
||||
## Container Logs Not Available
|
||||
|
||||
Docker container stdout/stderr are **not accessible via the Unraid API**. SSH to the Unraid server and use `docker logs <container>` directly.
|
||||
|
||||
@@ -31,8 +31,12 @@ def make_tool_fn(
|
||||
|
||||
This wraps the repeated pattern of creating a test FastMCP instance,
|
||||
registering a tool, and extracting the inner function. Centralizing
|
||||
this avoids reliance on FastMCP's private `_tool_manager._tools` API
|
||||
in every test file.
|
||||
this avoids reliance on FastMCP's internal tool storage API in every
|
||||
test file.
|
||||
|
||||
FastMCP 3.x removed `_tool_manager._tools`; use `await mcp.get_tool()`
|
||||
instead. We run a small event loop here to keep the helper synchronous
|
||||
so callers don't need to change.
|
||||
|
||||
Args:
|
||||
module_path: Dotted import path to the tool module (e.g., "unraid_mcp.tools.info")
|
||||
@@ -48,4 +52,8 @@ def make_tool_fn(
|
||||
register_fn = getattr(module, register_fn_name)
|
||||
test_mcp = FastMCP("test")
|
||||
register_fn(test_mcp)
|
||||
return test_mcp._tool_manager._tools[tool_name].fn # type: ignore[union-attr]
|
||||
# FastMCP 3.x stores tools in providers[0]._components keyed as "tool:{name}@"
|
||||
# (the "@" suffix is the version separator with no version set).
|
||||
local_provider = test_mcp.providers[0]
|
||||
tool = local_provider._components[f"tool:{tool_name}@"]
|
||||
return tool.fn
|
||||
|
||||
0
tests/contract/__init__.py
Normal file
0
tests/contract/__init__.py
Normal file
976
tests/contract/test_response_contracts.py
Normal file
976
tests/contract/test_response_contracts.py
Normal file
@@ -0,0 +1,976 @@
|
||||
"""Contract tests: validate GraphQL response shapes with Pydantic models.
|
||||
|
||||
These tests document and enforce the response structure that callers of each
|
||||
tool action can rely on. A Pydantic ValidationError here means the tool's
|
||||
response shape changed — a breaking change for any downstream consumer.
|
||||
|
||||
Coverage:
|
||||
- Docker: list, details, networks, start/stop mutations
|
||||
- Info: overview, array, metrics, services, online, registration, network
|
||||
- Storage: shares, disks, disk_details, log_files
|
||||
- Notifications: overview, list, create
|
||||
"""
|
||||
|
||||
from collections.abc import Generator
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
from tests.conftest import make_tool_fn
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pydantic contract models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
# --- Docker ---
|
||||
|
||||
|
||||
class DockerContainer(BaseModel):
|
||||
"""Minimal shape of a container as returned by docker/list."""
|
||||
|
||||
id: str
|
||||
names: list[str]
|
||||
state: str
|
||||
image: str | None = None
|
||||
status: str | None = None
|
||||
autoStart: bool | None = None
|
||||
|
||||
|
||||
class DockerContainerDetails(BaseModel):
|
||||
"""Extended shape returned by docker/details."""
|
||||
|
||||
id: str
|
||||
names: list[str]
|
||||
state: str
|
||||
image: str | None = None
|
||||
imageId: str | None = None
|
||||
command: str | None = None
|
||||
created: Any = None
|
||||
ports: list[Any] | None = None
|
||||
sizeRootFs: Any = None
|
||||
labels: Any = None
|
||||
status: str | None = None
|
||||
autoStart: bool | None = None
|
||||
|
||||
|
||||
class DockerNetwork(BaseModel):
|
||||
"""Shape of a docker network entry."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
driver: str | None = None
|
||||
scope: str | None = None
|
||||
|
||||
|
||||
class DockerMutationResult(BaseModel):
|
||||
"""Shape returned by docker start/stop/pause/unpause mutations."""
|
||||
|
||||
success: bool
|
||||
subaction: str
|
||||
container: Any = None
|
||||
|
||||
|
||||
class DockerListResult(BaseModel):
|
||||
"""Top-level shape of docker/list response."""
|
||||
|
||||
containers: list[Any]
|
||||
|
||||
|
||||
class DockerNetworkListResult(BaseModel):
|
||||
"""Top-level shape of docker/networks response."""
|
||||
|
||||
networks: list[Any]
|
||||
|
||||
|
||||
# --- Info ---
|
||||
|
||||
|
||||
class InfoOverviewSummary(BaseModel):
|
||||
"""Summary block inside info/overview response."""
|
||||
|
||||
hostname: str | None = None
|
||||
uptime: Any = None
|
||||
cpu: str | None = None
|
||||
os: str | None = None
|
||||
memory_summary: str | None = None
|
||||
|
||||
|
||||
class InfoOverviewResult(BaseModel):
|
||||
"""Top-level shape of info/overview."""
|
||||
|
||||
summary: dict[str, Any]
|
||||
details: dict[str, Any]
|
||||
|
||||
|
||||
class ArraySummary(BaseModel):
|
||||
"""Summary block inside info/array response."""
|
||||
|
||||
state: str | None = None
|
||||
num_data_disks: int
|
||||
num_parity_disks: int
|
||||
num_cache_pools: int
|
||||
overall_health: str
|
||||
|
||||
|
||||
class InfoArrayResult(BaseModel):
|
||||
"""Top-level shape of info/array."""
|
||||
|
||||
summary: dict[str, Any]
|
||||
details: dict[str, Any]
|
||||
|
||||
|
||||
class CpuMetrics(BaseModel):
|
||||
percentTotal: float | None = None
|
||||
|
||||
|
||||
class MemoryMetrics(BaseModel):
|
||||
total: Any = None
|
||||
used: Any = None
|
||||
free: Any = None
|
||||
available: Any = None
|
||||
buffcache: Any = None
|
||||
percentTotal: float | None = None
|
||||
|
||||
|
||||
class InfoMetricsResult(BaseModel):
|
||||
"""Top-level shape of info/metrics."""
|
||||
|
||||
cpu: dict[str, Any] | None = None
|
||||
memory: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class ServiceEntry(BaseModel):
|
||||
"""Shape of a single service in info/services response."""
|
||||
|
||||
name: str
|
||||
online: bool | None = None
|
||||
version: str | None = None
|
||||
|
||||
|
||||
class InfoServicesResult(BaseModel):
|
||||
services: list[Any]
|
||||
|
||||
|
||||
class InfoOnlineResult(BaseModel):
|
||||
online: bool | None = None
|
||||
|
||||
|
||||
class RegistrationResult(BaseModel):
|
||||
"""Shape of info/registration response."""
|
||||
|
||||
id: str | None = None
|
||||
type: str | None = None
|
||||
state: str | None = None
|
||||
expiration: Any = None
|
||||
|
||||
|
||||
class InfoNetworkResult(BaseModel):
|
||||
"""Shape of info/network response."""
|
||||
|
||||
accessUrls: list[Any]
|
||||
httpPort: Any = None
|
||||
httpsPort: Any = None
|
||||
localTld: str | None = None
|
||||
useSsl: Any = None
|
||||
|
||||
|
||||
# --- Storage ---
|
||||
|
||||
|
||||
class ShareEntry(BaseModel):
|
||||
"""Shape of a single share in storage/shares response."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
free: Any = None
|
||||
used: Any = None
|
||||
size: Any = None
|
||||
|
||||
|
||||
class StorageSharesResult(BaseModel):
|
||||
shares: list[Any]
|
||||
|
||||
|
||||
class DiskEntry(BaseModel):
|
||||
"""Minimal shape of a disk in storage/disks response."""
|
||||
|
||||
id: str
|
||||
device: str | None = None
|
||||
name: str | None = None
|
||||
|
||||
|
||||
class StorageDisksResult(BaseModel):
|
||||
disks: list[Any]
|
||||
|
||||
|
||||
class DiskDetailsSummary(BaseModel):
|
||||
"""Summary block in storage/disk_details response."""
|
||||
|
||||
disk_id: str | None = None
|
||||
device: str | None = None
|
||||
name: str | None = None
|
||||
serial_number: str | None = None
|
||||
size_formatted: str
|
||||
temperature: str
|
||||
|
||||
|
||||
class StorageDiskDetailsResult(BaseModel):
|
||||
"""Top-level shape of storage/disk_details."""
|
||||
|
||||
summary: dict[str, Any]
|
||||
details: dict[str, Any]
|
||||
|
||||
|
||||
class LogFileEntry(BaseModel):
|
||||
"""Shape of a log file entry in storage/log_files response."""
|
||||
|
||||
name: str
|
||||
path: str
|
||||
size: Any = None
|
||||
modifiedAt: Any = None
|
||||
|
||||
|
||||
class StorageLogFilesResult(BaseModel):
|
||||
log_files: list[Any]
|
||||
|
||||
|
||||
# --- Notifications ---
|
||||
|
||||
|
||||
class NotificationCountBucket(BaseModel):
|
||||
"""Counts within a single severity bucket."""
|
||||
|
||||
info: int | None = None
|
||||
warning: int | None = None
|
||||
alert: int | None = None
|
||||
total: int | None = None
|
||||
|
||||
|
||||
class NotificationOverviewResult(BaseModel):
|
||||
"""Top-level shape of notifications/overview."""
|
||||
|
||||
unread: dict[str, Any] | None = None
|
||||
archive: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class NotificationEntry(BaseModel):
|
||||
"""Shape of a single notification in notifications/list response."""
|
||||
|
||||
id: str
|
||||
title: str | None = None
|
||||
subject: str | None = None
|
||||
description: str | None = None
|
||||
importance: str | None = None
|
||||
type: str | None = None
|
||||
timestamp: Any = None
|
||||
formattedTimestamp: Any = None
|
||||
link: Any = None
|
||||
|
||||
|
||||
class NotificationListResult(BaseModel):
|
||||
notifications: list[Any]
|
||||
|
||||
|
||||
class NotificationCreateResult(BaseModel):
|
||||
success: bool
|
||||
notification: dict[str, Any]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _docker_mock() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _info_mock() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _storage_mock() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _notifications_mock() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _docker_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
def _info_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
def _storage_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
def _notifications_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Docker contract tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDockerListContract:
|
||||
"""docker/list always returns {"containers": [...]}."""
|
||||
|
||||
async def test_list_result_has_containers_key(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {"docker": {"containers": []}}
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
DockerListResult(**result)
|
||||
|
||||
async def test_list_containers_conform_to_shape(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {
|
||||
"docker": {
|
||||
"containers": [
|
||||
{"id": "c1", "names": ["nginx"], "state": "running", "image": "nginx:latest"},
|
||||
{"id": "c2", "names": ["redis"], "state": "exited", "autoStart": False},
|
||||
]
|
||||
}
|
||||
}
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
validated = DockerListResult(**result)
|
||||
for container in validated.containers:
|
||||
DockerContainer(**container)
|
||||
|
||||
async def test_list_empty_containers_is_valid(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {"docker": {"containers": []}}
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
validated = DockerListResult(**result)
|
||||
assert validated.containers == []
|
||||
|
||||
async def test_list_container_minimal_fields_valid(self, _docker_mock: AsyncMock) -> None:
|
||||
"""A container with only id, names, and state should validate."""
|
||||
_docker_mock.return_value = {
|
||||
"docker": {"containers": [{"id": "abc123", "names": ["plex"], "state": "running"}]}
|
||||
}
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
container_raw = result["containers"][0]
|
||||
DockerContainer(**container_raw)
|
||||
|
||||
async def test_list_missing_names_fails_contract(self, _docker_mock: AsyncMock) -> None:
|
||||
"""A container missing required 'names' field must fail validation."""
|
||||
_docker_mock.return_value = {
|
||||
"docker": {"containers": [{"id": "abc123", "state": "running"}]}
|
||||
}
|
||||
result = await _docker_tool()(action="docker", subaction="list")
|
||||
with pytest.raises(ValidationError):
|
||||
DockerContainer(**result["containers"][0])
|
||||
|
||||
|
||||
class TestDockerDetailsContract:
|
||||
"""docker/details returns the raw container dict (not wrapped)."""
|
||||
|
||||
async def test_details_conforms_to_shape(self, _docker_mock: AsyncMock) -> None:
|
||||
cid = "a" * 64 + ":local"
|
||||
_docker_mock.return_value = {
|
||||
"docker": {
|
||||
"containers": [
|
||||
{
|
||||
"id": cid,
|
||||
"names": ["plex"],
|
||||
"state": "running",
|
||||
"image": "plexinc/pms:latest",
|
||||
"status": "Up 3 hours",
|
||||
"ports": [],
|
||||
"autoStart": True,
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
result = await _docker_tool()(action="docker", subaction="details", container_id=cid)
|
||||
DockerContainerDetails(**result)
|
||||
|
||||
async def test_details_has_required_fields(self, _docker_mock: AsyncMock) -> None:
|
||||
cid = "b" * 64 + ":local"
|
||||
_docker_mock.return_value = {
|
||||
"docker": {"containers": [{"id": cid, "names": ["sonarr"], "state": "exited"}]}
|
||||
}
|
||||
result = await _docker_tool()(action="docker", subaction="details", container_id=cid)
|
||||
assert "id" in result
|
||||
assert "names" in result
|
||||
assert "state" in result
|
||||
|
||||
|
||||
class TestDockerNetworksContract:
|
||||
"""docker/networks returns {"networks": [...]}."""
|
||||
|
||||
async def test_networks_result_has_networks_key(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {
|
||||
"docker": {"networks": [{"id": "net:1", "name": "bridge", "driver": "bridge"}]}
|
||||
}
|
||||
result = await _docker_tool()(action="docker", subaction="networks")
|
||||
DockerNetworkListResult(**result)
|
||||
|
||||
async def test_network_entries_conform_to_shape(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {
|
||||
"docker": {
|
||||
"networks": [
|
||||
{"id": "net:1", "name": "bridge", "driver": "bridge", "scope": "local"},
|
||||
{"id": "net:2", "name": "host", "driver": "host", "scope": "local"},
|
||||
]
|
||||
}
|
||||
}
|
||||
result = await _docker_tool()(action="docker", subaction="networks")
|
||||
for net in result["networks"]:
|
||||
DockerNetwork(**net)
|
||||
|
||||
async def test_empty_networks_is_valid(self, _docker_mock: AsyncMock) -> None:
|
||||
_docker_mock.return_value = {"docker": {"networks": []}}
|
||||
result = await _docker_tool()(action="docker", subaction="networks")
|
||||
validated = DockerNetworkListResult(**result)
|
||||
assert validated.networks == []
|
||||
|
||||
|
||||
class TestDockerMutationContract:
|
||||
"""docker start/stop return {"success": bool, "action": str, "container": ...}."""
|
||||
|
||||
async def test_start_mutation_result_shape(self, _docker_mock: AsyncMock) -> None:
|
||||
cid = "c" * 64 + ":local"
|
||||
_docker_mock.side_effect = [
|
||||
{"docker": {"containers": [{"id": cid, "names": ["plex"]}]}},
|
||||
{"docker": {"start": {"id": cid, "names": ["plex"], "state": "running"}}},
|
||||
]
|
||||
result = await _docker_tool()(action="docker", subaction="start", container_id=cid)
|
||||
validated = DockerMutationResult(**result)
|
||||
assert validated.success is True
|
||||
assert validated.subaction == "start"
|
||||
|
||||
async def test_stop_mutation_result_shape(self, _docker_mock: AsyncMock) -> None:
|
||||
cid = "d" * 64 + ":local"
|
||||
_docker_mock.side_effect = [
|
||||
{"docker": {"containers": [{"id": cid, "names": ["nginx"]}]}},
|
||||
{"docker": {"stop": {"id": cid, "names": ["nginx"], "state": "exited"}}},
|
||||
]
|
||||
result = await _docker_tool()(action="docker", subaction="stop", container_id=cid)
|
||||
validated = DockerMutationResult(**result)
|
||||
assert validated.success is True
|
||||
assert validated.subaction == "stop"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Info contract tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestInfoOverviewContract:
|
||||
"""info/overview returns {"summary": {...}, "details": {...}}."""
|
||||
|
||||
async def test_overview_has_summary_and_details(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"info": {
|
||||
"os": {
|
||||
"platform": "linux",
|
||||
"distro": "Unraid",
|
||||
"release": "6.12.0",
|
||||
"hostname": "tootie",
|
||||
"uptime": 86400,
|
||||
"arch": "x64",
|
||||
},
|
||||
"cpu": {
|
||||
"manufacturer": "Intel",
|
||||
"brand": "Core i7-9700K",
|
||||
"cores": 8,
|
||||
"threads": 8,
|
||||
},
|
||||
"memory": {"layout": []},
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="overview")
|
||||
validated = InfoOverviewResult(**result)
|
||||
assert isinstance(validated.summary, dict)
|
||||
assert isinstance(validated.details, dict)
|
||||
|
||||
async def test_overview_summary_contains_hostname(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"info": {
|
||||
"os": {
|
||||
"hostname": "myserver",
|
||||
"distro": "Unraid",
|
||||
"release": "6.12",
|
||||
"platform": "linux",
|
||||
"arch": "x64",
|
||||
"uptime": 100,
|
||||
},
|
||||
"cpu": {"manufacturer": "AMD", "brand": "Ryzen", "cores": 4, "threads": 8},
|
||||
"memory": {"layout": []},
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="overview")
|
||||
InfoOverviewSummary(**result["summary"])
|
||||
assert result["summary"]["hostname"] == "myserver"
|
||||
|
||||
async def test_overview_details_mirrors_raw_info(self, _info_mock: AsyncMock) -> None:
|
||||
raw_info = {
|
||||
"os": {
|
||||
"hostname": "srv",
|
||||
"distro": "Unraid",
|
||||
"release": "6",
|
||||
"platform": "linux",
|
||||
"arch": "x64",
|
||||
},
|
||||
"cpu": {"manufacturer": "Intel", "brand": "Xeon", "cores": 16, "threads": 32},
|
||||
"memory": {"layout": []},
|
||||
}
|
||||
_info_mock.return_value = {"info": raw_info}
|
||||
result = await _info_tool()(action="system", subaction="overview")
|
||||
assert result["details"] == raw_info
|
||||
|
||||
|
||||
class TestInfoArrayContract:
|
||||
"""info/array returns {"summary": {...}, "details": {...}} with health analysis."""
|
||||
|
||||
async def test_array_result_shape(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"array": {
|
||||
"id": "array:1",
|
||||
"state": "STARTED",
|
||||
"capacity": {"kilobytes": {"free": 1000000, "used": 500000, "total": 1500000}},
|
||||
"parities": [{"id": "p1", "status": "DISK_OK"}],
|
||||
"disks": [{"id": "d1", "status": "DISK_OK"}, {"id": "d2", "status": "DISK_OK"}],
|
||||
"caches": [],
|
||||
"boot": None,
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="array")
|
||||
validated = InfoArrayResult(**result)
|
||||
assert isinstance(validated.summary, dict)
|
||||
assert isinstance(validated.details, dict)
|
||||
|
||||
async def test_array_summary_contains_required_fields(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"array": {
|
||||
"state": "STARTED",
|
||||
"capacity": {"kilobytes": {"free": 500000, "used": 250000, "total": 750000}},
|
||||
"parities": [],
|
||||
"disks": [{"id": "d1", "status": "DISK_OK"}],
|
||||
"caches": [],
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="array")
|
||||
ArraySummary(**result["summary"])
|
||||
|
||||
async def test_array_health_overall_healthy(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"array": {
|
||||
"state": "STARTED",
|
||||
"capacity": {"kilobytes": {"free": 1000000, "used": 0, "total": 1000000}},
|
||||
"parities": [{"id": "p1", "status": "DISK_OK", "warning": None, "critical": None}],
|
||||
"disks": [{"id": "d1", "status": "DISK_OK", "warning": None, "critical": None}],
|
||||
"caches": [],
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="array")
|
||||
assert result["summary"]["overall_health"] == "HEALTHY"
|
||||
|
||||
async def test_array_health_critical_with_failed_disk(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"array": {
|
||||
"state": "DEGRADED",
|
||||
"capacity": {"kilobytes": {"free": 0, "used": 0, "total": 0}},
|
||||
"parities": [{"id": "p1", "status": "DISK_DSBL"}],
|
||||
"disks": [],
|
||||
"caches": [],
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="array")
|
||||
assert result["summary"]["overall_health"] == "CRITICAL"
|
||||
|
||||
|
||||
class TestInfoMetricsContract:
|
||||
"""info/metrics returns {"cpu": {...}, "memory": {...}}."""
|
||||
|
||||
async def test_metrics_result_shape(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"metrics": {
|
||||
"cpu": {"percentTotal": 12.5},
|
||||
"memory": {
|
||||
"total": 16384,
|
||||
"used": 8192,
|
||||
"free": 4096,
|
||||
"available": 6144,
|
||||
"buffcache": 2048,
|
||||
"percentTotal": 50.0,
|
||||
},
|
||||
}
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="metrics")
|
||||
validated = InfoMetricsResult(**result)
|
||||
assert validated.cpu is not None
|
||||
assert validated.memory is not None
|
||||
|
||||
async def test_metrics_cpu_percent_in_range(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"metrics": {"cpu": {"percentTotal": 75.3}, "memory": {"percentTotal": 60.0}}
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="metrics")
|
||||
cpu_pct = result["cpu"]["percentTotal"]
|
||||
assert 0.0 <= cpu_pct <= 100.0
|
||||
|
||||
|
||||
class TestInfoServicesContract:
|
||||
"""info/services returns {"services": [...]}."""
|
||||
|
||||
async def test_services_result_shape(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"services": [
|
||||
{"name": "nginx", "online": True, "version": "1.25"},
|
||||
{"name": "docker", "online": True, "version": "24.0"},
|
||||
]
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="services")
|
||||
validated = InfoServicesResult(**result)
|
||||
for svc in validated.services:
|
||||
ServiceEntry(**svc)
|
||||
|
||||
async def test_services_empty_list_is_valid(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {"services": []}
|
||||
result = await _info_tool()(action="system", subaction="services")
|
||||
InfoServicesResult(**result)
|
||||
assert result["services"] == []
|
||||
|
||||
|
||||
class TestInfoOnlineContract:
|
||||
"""info/online returns {"online": bool|None}."""
|
||||
|
||||
async def test_online_true_shape(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {"online": True}
|
||||
result = await _info_tool()(action="system", subaction="online")
|
||||
validated = InfoOnlineResult(**result)
|
||||
assert validated.online is True
|
||||
|
||||
async def test_online_false_shape(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {"online": False}
|
||||
result = await _info_tool()(action="system", subaction="online")
|
||||
validated = InfoOnlineResult(**result)
|
||||
assert validated.online is False
|
||||
|
||||
|
||||
class TestInfoNetworkContract:
|
||||
"""info/network returns access URLs and port configuration."""
|
||||
|
||||
async def test_network_result_shape(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"servers": [
|
||||
{
|
||||
"id": "s1",
|
||||
"lanip": "192.168.1.10",
|
||||
"wanip": "1.2.3.4",
|
||||
"localurl": "http://tower.local",
|
||||
"remoteurl": "https://myunraid.net/s1",
|
||||
}
|
||||
],
|
||||
"vars": {"port": 80, "portssl": 443, "localTld": "local", "useSsl": "no"},
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="network")
|
||||
validated = InfoNetworkResult(**result)
|
||||
assert isinstance(validated.accessUrls, list)
|
||||
|
||||
async def test_network_empty_servers_still_valid(self, _info_mock: AsyncMock) -> None:
|
||||
_info_mock.return_value = {
|
||||
"servers": [],
|
||||
"vars": {"port": 80, "portssl": 443, "localTld": "local", "useSsl": "no"},
|
||||
}
|
||||
result = await _info_tool()(action="system", subaction="network")
|
||||
validated = InfoNetworkResult(**result)
|
||||
assert validated.accessUrls == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Storage contract tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStorageSharesContract:
|
||||
"""storage/shares returns {"shares": [...]}."""
|
||||
|
||||
async def test_shares_result_shape(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {
|
||||
"shares": [
|
||||
{"id": "share:1", "name": "media", "free": 500000, "used": 100000, "size": 600000},
|
||||
{"id": "share:2", "name": "appdata", "free": 200000, "used": 50000, "size": 250000},
|
||||
]
|
||||
}
|
||||
result = await _storage_tool()(action="disk", subaction="shares")
|
||||
validated = StorageSharesResult(**result)
|
||||
for share in validated.shares:
|
||||
ShareEntry(**share)
|
||||
|
||||
async def test_shares_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {"shares": []}
|
||||
result = await _storage_tool()(action="disk", subaction="shares")
|
||||
StorageSharesResult(**result)
|
||||
assert result["shares"] == []
|
||||
|
||||
async def test_shares_missing_name_fails_contract(self, _storage_mock: AsyncMock) -> None:
|
||||
"""A share without required 'name' must fail contract validation."""
|
||||
_storage_mock.return_value = {"shares": [{"id": "share:1", "free": 100}]}
|
||||
result = await _storage_tool()(action="disk", subaction="shares")
|
||||
with pytest.raises(ValidationError):
|
||||
ShareEntry(**result["shares"][0])
|
||||
|
||||
|
||||
class TestStorageDisksContract:
|
||||
"""storage/disks returns {"disks": [...]}."""
|
||||
|
||||
async def test_disks_result_shape(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {
|
||||
"disks": [
|
||||
{"id": "disk:1", "device": "sda", "name": "WD_RED_4TB"},
|
||||
{"id": "disk:2", "device": "sdb", "name": "Seagate_8TB"},
|
||||
]
|
||||
}
|
||||
result = await _storage_tool()(action="disk", subaction="disks")
|
||||
validated = StorageDisksResult(**result)
|
||||
for disk in validated.disks:
|
||||
DiskEntry(**disk)
|
||||
|
||||
async def test_disks_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {"disks": []}
|
||||
result = await _storage_tool()(action="disk", subaction="disks")
|
||||
StorageDisksResult(**result)
|
||||
assert result["disks"] == []
|
||||
|
||||
|
||||
class TestStorageDiskDetailsContract:
|
||||
"""storage/disk_details returns {"summary": {...}, "details": {...}}."""
|
||||
|
||||
async def test_disk_details_result_shape(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {
|
||||
"disk": {
|
||||
"id": "disk:1",
|
||||
"device": "sda",
|
||||
"name": "WD_RED_4TB",
|
||||
"serialNum": "WD-12345678",
|
||||
"size": 4000000000,
|
||||
"temperature": 35,
|
||||
}
|
||||
}
|
||||
result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:1")
|
||||
validated = StorageDiskDetailsResult(**result)
|
||||
assert isinstance(validated.summary, dict)
|
||||
assert isinstance(validated.details, dict)
|
||||
|
||||
async def test_disk_details_summary_has_required_fields(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {
|
||||
"disk": {
|
||||
"id": "disk:2",
|
||||
"device": "sdb",
|
||||
"name": "Seagate",
|
||||
"serialNum": "ST-ABC",
|
||||
"size": 8000000000,
|
||||
"temperature": 40,
|
||||
}
|
||||
}
|
||||
result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:2")
|
||||
DiskDetailsSummary(**result["summary"])
|
||||
|
||||
async def test_disk_details_temperature_formatted(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {
|
||||
"disk": {
|
||||
"id": "disk:3",
|
||||
"device": "sdc",
|
||||
"name": "MyDisk",
|
||||
"serialNum": "XYZ",
|
||||
"size": 2000000000,
|
||||
"temperature": 38,
|
||||
}
|
||||
}
|
||||
result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:3")
|
||||
assert "°C" in result["summary"]["temperature"]
|
||||
|
||||
async def test_disk_details_no_temperature_shows_na(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {
|
||||
"disk": {
|
||||
"id": "disk:4",
|
||||
"device": "sdd",
|
||||
"name": "NoDisk",
|
||||
"serialNum": "000",
|
||||
"size": 1000000000,
|
||||
"temperature": None,
|
||||
}
|
||||
}
|
||||
result = await _storage_tool()(action="disk", subaction="disk_details", disk_id="disk:4")
|
||||
assert result["summary"]["temperature"] == "N/A"
|
||||
|
||||
|
||||
class TestStorageLogFilesContract:
|
||||
"""storage/log_files returns {"log_files": [...]}."""
|
||||
|
||||
async def test_log_files_result_shape(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {
|
||||
"logFiles": [
|
||||
{
|
||||
"name": "syslog",
|
||||
"path": "/var/log/syslog",
|
||||
"size": 1024,
|
||||
"modifiedAt": "2026-03-15",
|
||||
},
|
||||
{
|
||||
"name": "messages",
|
||||
"path": "/var/log/messages",
|
||||
"size": 512,
|
||||
"modifiedAt": "2026-03-14",
|
||||
},
|
||||
]
|
||||
}
|
||||
result = await _storage_tool()(action="disk", subaction="log_files")
|
||||
validated = StorageLogFilesResult(**result)
|
||||
for log_file in validated.log_files:
|
||||
LogFileEntry(**log_file)
|
||||
|
||||
async def test_log_files_empty_list_is_valid(self, _storage_mock: AsyncMock) -> None:
|
||||
_storage_mock.return_value = {"logFiles": []}
|
||||
result = await _storage_tool()(action="disk", subaction="log_files")
|
||||
StorageLogFilesResult(**result)
|
||||
assert result["log_files"] == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Notifications contract tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNotificationsOverviewContract:
|
||||
"""notifications/overview returns {"unread": {...}, "archive": {...}}."""
|
||||
|
||||
async def test_overview_result_shape(self, _notifications_mock: AsyncMock) -> None:
|
||||
_notifications_mock.return_value = {
|
||||
"notifications": {
|
||||
"overview": {
|
||||
"unread": {"info": 2, "warning": 1, "alert": 0, "total": 3},
|
||||
"archive": {"info": 10, "warning": 5, "alert": 2, "total": 17},
|
||||
}
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(action="notification", subaction="overview")
|
||||
validated = NotificationOverviewResult(**result)
|
||||
assert validated.unread is not None
|
||||
assert validated.archive is not None
|
||||
|
||||
async def test_overview_unread_bucket_conforms(self, _notifications_mock: AsyncMock) -> None:
|
||||
_notifications_mock.return_value = {
|
||||
"notifications": {
|
||||
"overview": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 1, "total": 1},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(action="notification", subaction="overview")
|
||||
NotificationCountBucket(**result["unread"])
|
||||
NotificationCountBucket(**result["archive"])
|
||||
|
||||
async def test_overview_empty_counts_valid(self, _notifications_mock: AsyncMock) -> None:
|
||||
_notifications_mock.return_value = {
|
||||
"notifications": {
|
||||
"overview": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(action="notification", subaction="overview")
|
||||
NotificationOverviewResult(**result)
|
||||
|
||||
|
||||
class TestNotificationsListContract:
|
||||
"""notifications/list returns {"notifications": [...]}."""
|
||||
|
||||
async def test_list_result_shape(self, _notifications_mock: AsyncMock) -> None:
|
||||
_notifications_mock.return_value = {
|
||||
"notifications": {
|
||||
"list": [
|
||||
{
|
||||
"id": "notif:1",
|
||||
"title": "Array degraded",
|
||||
"subject": "Storage alert",
|
||||
"description": "Disk 3 failed",
|
||||
"importance": "ALERT",
|
||||
"type": "UNREAD",
|
||||
"timestamp": 1741000000,
|
||||
"formattedTimestamp": "Mar 15 2026",
|
||||
"link": None,
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(action="notification", subaction="list")
|
||||
validated = NotificationListResult(**result)
|
||||
for notif in validated.notifications:
|
||||
NotificationEntry(**notif)
|
||||
|
||||
async def test_list_empty_notifications_valid(self, _notifications_mock: AsyncMock) -> None:
|
||||
_notifications_mock.return_value = {"notifications": {"list": []}}
|
||||
result = await _notifications_tool()(action="notification", subaction="list")
|
||||
NotificationListResult(**result)
|
||||
assert result["notifications"] == []
|
||||
|
||||
async def test_list_notification_missing_id_fails_contract(
|
||||
self, _notifications_mock: AsyncMock
|
||||
) -> None:
|
||||
"""A notification missing required 'id' field must fail contract validation."""
|
||||
_notifications_mock.return_value = {
|
||||
"notifications": {"list": [{"title": "No ID here", "importance": "INFO"}]}
|
||||
}
|
||||
result = await _notifications_tool()(action="notification", subaction="list")
|
||||
with pytest.raises(ValidationError):
|
||||
NotificationEntry(**result["notifications"][0])
|
||||
|
||||
|
||||
class TestNotificationsCreateContract:
|
||||
"""notifications/create returns {"success": bool, "notification": {...}}."""
|
||||
|
||||
async def test_create_result_shape(self, _notifications_mock: AsyncMock) -> None:
|
||||
_notifications_mock.return_value = {
|
||||
"createNotification": {
|
||||
"id": "notif:new",
|
||||
"title": "Test notification",
|
||||
"importance": "INFO",
|
||||
}
|
||||
}
|
||||
result = await _notifications_tool()(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Test notification",
|
||||
subject="Test subject",
|
||||
description="This is a test",
|
||||
importance="INFO",
|
||||
)
|
||||
validated = NotificationCreateResult(**result)
|
||||
assert validated.success is True
|
||||
assert "id" in validated.notification
|
||||
|
||||
async def test_create_notification_has_id(self, _notifications_mock: AsyncMock) -> None:
|
||||
_notifications_mock.return_value = {
|
||||
"createNotification": {"id": "notif:42", "title": "Alert!", "importance": "ALERT"}
|
||||
}
|
||||
result = await _notifications_tool()(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Alert!",
|
||||
subject="Critical issue",
|
||||
description="Something went wrong",
|
||||
importance="ALERT",
|
||||
)
|
||||
assert result["notification"]["id"] == "notif:42"
|
||||
assert result["notification"]["importance"] == "ALERT"
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ data management without requiring a live Unraid server.
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
@@ -16,6 +16,7 @@ import websockets.exceptions
|
||||
|
||||
from unraid_mcp.subscriptions.manager import SubscriptionManager
|
||||
|
||||
|
||||
pytestmark = pytest.mark.integration
|
||||
|
||||
|
||||
@@ -42,9 +43,7 @@ class FakeWebSocket:
|
||||
subprotocol: str = "graphql-transport-ws",
|
||||
) -> None:
|
||||
self.subprotocol = subprotocol
|
||||
self._messages = [
|
||||
json.dumps(m) if isinstance(m, dict) else m for m in messages
|
||||
]
|
||||
self._messages = [json.dumps(m) if isinstance(m, dict) else m for m in messages]
|
||||
self._index = 0
|
||||
self.send = AsyncMock()
|
||||
|
||||
@@ -53,9 +52,7 @@ class FakeWebSocket:
|
||||
# Simulate normal connection close when messages exhausted
|
||||
from websockets.frames import Close
|
||||
|
||||
raise websockets.exceptions.ConnectionClosed(
|
||||
Close(1000, "normal closure"), None
|
||||
)
|
||||
raise websockets.exceptions.ConnectionClosed(Close(1000, "normal closure"), None)
|
||||
msg = self._messages[self._index]
|
||||
self._index += 1
|
||||
return msg
|
||||
@@ -83,7 +80,7 @@ SAMPLE_QUERY = "subscription { test { value } }"
|
||||
|
||||
# Shared patch targets
|
||||
_WS_CONNECT = "unraid_mcp.subscriptions.manager.websockets.connect"
|
||||
_API_URL = "unraid_mcp.subscriptions.manager.UNRAID_API_URL"
|
||||
_API_URL = "unraid_mcp.subscriptions.utils.UNRAID_API_URL"
|
||||
_API_KEY = "unraid_mcp.subscriptions.manager.UNRAID_API_KEY"
|
||||
_SSL_CTX = "unraid_mcp.subscriptions.manager.build_ws_ssl_context"
|
||||
_SLEEP = "unraid_mcp.subscriptions.manager.asyncio.sleep"
|
||||
@@ -95,12 +92,11 @@ _SLEEP = "unraid_mcp.subscriptions.manager.asyncio.sleep"
|
||||
|
||||
|
||||
class TestSubscriptionManagerInit:
|
||||
|
||||
def test_default_state(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
assert mgr.active_subscriptions == {}
|
||||
assert mgr.resource_data == {}
|
||||
assert mgr.websocket is None
|
||||
assert not hasattr(mgr, "websocket")
|
||||
|
||||
def test_default_auto_start_enabled(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
@@ -136,7 +132,6 @@ class TestSubscriptionManagerInit:
|
||||
|
||||
|
||||
class TestConnectionLifecycle:
|
||||
|
||||
async def test_start_subscription_creates_task(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
ws = FakeWebSocket([{"type": "connection_ack"}])
|
||||
@@ -241,16 +236,17 @@ def _loop_patches(
|
||||
|
||||
|
||||
class TestProtocolHandling:
|
||||
|
||||
async def test_connection_init_sends_auth(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_ack"},
|
||||
{"type": "next", "id": "test_sub", "payload": {"data": {"v": 1}}},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_ack"},
|
||||
{"type": "next", "id": "test_sub", "payload": {"data": {"v": 1}}},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws, api_key="my-secret-key")
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
@@ -258,7 +254,7 @@ class TestProtocolHandling:
|
||||
first_send = ws.send.call_args_list[0]
|
||||
init_msg = json.loads(first_send[0][0])
|
||||
assert init_msg["type"] == "connection_init"
|
||||
assert init_msg["payload"]["headers"]["X-API-Key"] == "my-secret-key"
|
||||
assert init_msg["payload"]["x-api-key"] == "my-secret-key"
|
||||
|
||||
async def test_subscribe_uses_subscribe_type_for_transport_ws(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
@@ -297,9 +293,11 @@ class TestProtocolHandling:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_error", "payload": {"message": "Invalid API key"}},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_error", "payload": {"message": "Invalid API key"}},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws, api_key="bad-key")
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
@@ -311,10 +309,12 @@ class TestProtocolHandling:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_ack"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_ack"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws, api_key="")
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
@@ -331,7 +331,6 @@ class TestProtocolHandling:
|
||||
|
||||
|
||||
class TestDataReception:
|
||||
|
||||
async def test_next_message_stores_resource_data(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
@@ -395,18 +394,19 @@ class TestDataReception:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_ack"},
|
||||
{"type": "ping"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_ack"},
|
||||
{"type": "ping"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws)
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
|
||||
pong_sent = any(
|
||||
json.loads(call[0][0]).get("type") == "pong"
|
||||
for call in ws.send.call_args_list
|
||||
json.loads(call[0][0]).get("type") == "pong" for call in ws.send.call_args_list
|
||||
)
|
||||
assert pong_sent, "Expected pong response to be sent"
|
||||
|
||||
@@ -414,11 +414,13 @@ class TestDataReception:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_ack"},
|
||||
{"type": "error", "id": "test_sub", "payload": {"message": "bad query"}},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_ack"},
|
||||
{"type": "error", "id": "test_sub", "payload": {"message": "bad query"}},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws)
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
@@ -431,10 +433,12 @@ class TestDataReception:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_ack"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_ack"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws)
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
@@ -464,12 +468,14 @@ class TestDataReception:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_ack"},
|
||||
{"type": "ka"},
|
||||
{"type": "pong"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_ack"},
|
||||
{"type": "ka"},
|
||||
{"type": "pong"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws)
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
@@ -478,11 +484,13 @@ class TestDataReception:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_ack"},
|
||||
"not valid json {{{",
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_ack"},
|
||||
"not valid json {{{",
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws)
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
@@ -494,7 +502,6 @@ class TestDataReception:
|
||||
|
||||
|
||||
class TestReconnection:
|
||||
|
||||
async def test_max_retries_exceeded_stops_loop(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 2
|
||||
@@ -557,10 +564,12 @@ class TestReconnection:
|
||||
mgr.max_reconnect_attempts = 10
|
||||
mgr.reconnect_attempts["test_sub"] = 5
|
||||
|
||||
ws = FakeWebSocket([
|
||||
{"type": "connection_ack"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
])
|
||||
ws = FakeWebSocket(
|
||||
[
|
||||
{"type": "connection_ack"},
|
||||
{"type": "complete", "id": "test_sub"},
|
||||
]
|
||||
)
|
||||
p = _loop_patches(ws)
|
||||
with p[0], p[1], p[2], p[3], p[4]:
|
||||
await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {})
|
||||
@@ -621,9 +630,7 @@ class TestReconnection:
|
||||
with (
|
||||
patch(
|
||||
_WS_CONNECT,
|
||||
side_effect=websockets.exceptions.ConnectionClosed(
|
||||
Close(1006, "abnormal"), None
|
||||
),
|
||||
side_effect=websockets.exceptions.ConnectionClosed(Close(1006, "abnormal"), None),
|
||||
),
|
||||
patch(_API_URL, "https://test.local"),
|
||||
patch(_API_KEY, "key"),
|
||||
@@ -642,7 +649,6 @@ class TestReconnection:
|
||||
|
||||
|
||||
class TestWebSocketURLConstruction:
|
||||
|
||||
async def test_https_converted_to_wss(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.max_reconnect_attempts = 1
|
||||
@@ -719,21 +725,20 @@ class TestWebSocketURLConstruction:
|
||||
|
||||
|
||||
class TestResourceData:
|
||||
|
||||
def test_get_resource_data_returns_none_when_empty(self) -> None:
|
||||
async def test_get_resource_data_returns_none_when_empty(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
assert mgr.get_resource_data("nonexistent") is None
|
||||
assert await mgr.get_resource_data("nonexistent") is None
|
||||
|
||||
def test_get_resource_data_returns_stored_data(self) -> None:
|
||||
async def test_get_resource_data_returns_stored_data(self) -> None:
|
||||
from unraid_mcp.core.types import SubscriptionData
|
||||
|
||||
mgr = SubscriptionManager()
|
||||
mgr.resource_data["test"] = SubscriptionData(
|
||||
data={"key": "value"},
|
||||
last_updated=datetime.now(),
|
||||
last_updated=datetime.now(UTC),
|
||||
subscription_type="test",
|
||||
)
|
||||
result = mgr.get_resource_data("test")
|
||||
result = await mgr.get_resource_data("test")
|
||||
assert result == {"key": "value"}
|
||||
|
||||
def test_list_active_subscriptions_empty(self) -> None:
|
||||
@@ -754,47 +759,46 @@ class TestResourceData:
|
||||
|
||||
|
||||
class TestSubscriptionStatus:
|
||||
|
||||
def test_status_includes_all_configured_subscriptions(self) -> None:
|
||||
async def test_status_includes_all_configured_subscriptions(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
status = mgr.get_subscription_status()
|
||||
status = await mgr.get_subscription_status()
|
||||
for name in mgr.subscription_configs:
|
||||
assert name in status
|
||||
|
||||
def test_status_default_connection_state(self) -> None:
|
||||
async def test_status_default_connection_state(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
status = mgr.get_subscription_status()
|
||||
status = await mgr.get_subscription_status()
|
||||
for sub_status in status.values():
|
||||
assert sub_status["runtime"]["connection_state"] == "not_started"
|
||||
|
||||
def test_status_shows_active_flag(self) -> None:
|
||||
async def test_status_shows_active_flag(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.active_subscriptions["logFileSubscription"] = MagicMock()
|
||||
status = mgr.get_subscription_status()
|
||||
status = await mgr.get_subscription_status()
|
||||
assert status["logFileSubscription"]["runtime"]["active"] is True
|
||||
|
||||
def test_status_shows_data_availability(self) -> None:
|
||||
async def test_status_shows_data_availability(self) -> None:
|
||||
from unraid_mcp.core.types import SubscriptionData
|
||||
|
||||
mgr = SubscriptionManager()
|
||||
mgr.resource_data["logFileSubscription"] = SubscriptionData(
|
||||
data={"log": "content"},
|
||||
last_updated=datetime.now(),
|
||||
last_updated=datetime.now(UTC),
|
||||
subscription_type="logFileSubscription",
|
||||
)
|
||||
status = mgr.get_subscription_status()
|
||||
status = await mgr.get_subscription_status()
|
||||
assert status["logFileSubscription"]["data"]["available"] is True
|
||||
|
||||
def test_status_shows_error_info(self) -> None:
|
||||
async def test_status_shows_error_info(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.last_error["logFileSubscription"] = "Test error message"
|
||||
status = mgr.get_subscription_status()
|
||||
status = await mgr.get_subscription_status()
|
||||
assert status["logFileSubscription"]["runtime"]["last_error"] == "Test error message"
|
||||
|
||||
def test_status_reconnect_attempts_tracked(self) -> None:
|
||||
async def test_status_reconnect_attempts_tracked(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.reconnect_attempts["logFileSubscription"] = 3
|
||||
status = mgr.get_subscription_status()
|
||||
status = await mgr.get_subscription_status()
|
||||
assert status["logFileSubscription"]["runtime"]["reconnect_attempts"] == 3
|
||||
|
||||
|
||||
@@ -804,7 +808,6 @@ class TestSubscriptionStatus:
|
||||
|
||||
|
||||
class TestAutoStart:
|
||||
|
||||
async def test_auto_start_disabled_skips_all(self) -> None:
|
||||
mgr = SubscriptionManager()
|
||||
mgr.auto_start_enabled = False
|
||||
@@ -852,7 +855,6 @@ class TestAutoStart:
|
||||
|
||||
|
||||
class TestSSLContext:
|
||||
|
||||
def test_non_wss_returns_none(self) -> None:
|
||||
from unraid_mcp.subscriptions.utils import build_ws_ssl_context
|
||||
|
||||
|
||||
151
tests/mcporter/README.md
Normal file
151
tests/mcporter/README.md
Normal file
@@ -0,0 +1,151 @@
|
||||
# mcporter Integration Tests
|
||||
|
||||
Live integration smoke-tests for the unraid-mcp server, exercising real API calls via [mcporter](https://github.com/mcporter/mcporter).
|
||||
|
||||
---
|
||||
|
||||
## Two Scripts, Two Transports
|
||||
|
||||
| | `test-tools.sh` | `test-actions.sh` |
|
||||
|-|-----------------|-------------------|
|
||||
| **Transport** | stdio | HTTP |
|
||||
| **Server required** | No — launched ad-hoc per call | Yes — must be running at `$MCP_URL` |
|
||||
| **Flags** | `--timeout-ms N`, `--parallel`, `--verbose` | positional `[MCP_URL]` |
|
||||
| **Coverage** | 10 tools (read-only actions only) | 11 tools (all non-destructive actions) |
|
||||
| **Use case** | CI / offline local check | Live server smoke-test |
|
||||
|
||||
### `test-tools.sh` — stdio, no running server needed
|
||||
|
||||
```bash
|
||||
./tests/mcporter/test-tools.sh # sequential, 25s timeout
|
||||
./tests/mcporter/test-tools.sh --parallel # parallel suites
|
||||
./tests/mcporter/test-tools.sh --timeout-ms 10000 # tighter timeout
|
||||
./tests/mcporter/test-tools.sh --verbose # print raw responses
|
||||
```
|
||||
|
||||
Launches `uv run unraid-mcp-server` in stdio mode for each tool call. Requires `mcporter`, `uv`, and `python3` in `PATH`. Good for CI pipelines — no persistent server process needed.
|
||||
|
||||
### `test-actions.sh` — HTTP, requires a live server
|
||||
|
||||
```bash
|
||||
./tests/mcporter/test-actions.sh # default: http://localhost:6970/mcp
|
||||
./tests/mcporter/test-actions.sh http://10.1.0.2:6970/mcp # explicit URL
|
||||
UNRAID_MCP_URL=http://10.1.0.2:6970/mcp ./tests/mcporter/test-actions.sh
|
||||
```
|
||||
|
||||
Connects to an already-running streamable-http server. Covers all read-only actions across 10 tools (`unraid_settings` is all-mutations and skipped; all destructive mutations are explicitly skipped).
|
||||
|
||||
---
|
||||
|
||||
## What `test-actions.sh` Tests
|
||||
|
||||
### Phase 1 — Param-free reads
|
||||
|
||||
All actions requiring no arguments beyond `action` itself.
|
||||
|
||||
| Tool | Actions tested |
|
||||
|------|----------------|
|
||||
| `unraid_info` | `overview`, `array`, `network`, `registration`, `connect`, `variables`, `metrics`, `services`, `display`, `config`, `online`, `owner`, `settings`, `server`, `servers`, `flash`, `ups_devices`, `ups_device`, `ups_config` |
|
||||
| `unraid_array` | `parity_status` |
|
||||
| `unraid_storage` | `disks`, `shares`, `unassigned`, `log_files` |
|
||||
| `unraid_docker` | `list`, `networks`, `port_conflicts`, `check_updates`, `sync_templates`, `refresh_digests` |
|
||||
| `unraid_vm` | `list` |
|
||||
| `unraid_notifications` | `overview`, `list`, `warnings`, `recalculate` |
|
||||
| `unraid_rclone` | `list_remotes`, `config_form` |
|
||||
| `unraid_users` | `me` |
|
||||
| `unraid_keys` | `list` |
|
||||
| `unraid_health` | `check`, `test_connection`, `diagnose` |
|
||||
| `unraid_settings` | *(all 9 actions skipped — mutations only)* |
|
||||
|
||||
### Phase 2 — ID-discovered reads
|
||||
|
||||
IDs are extracted from Phase 1 responses and used for actions requiring a specific resource. Each is skipped if Phase 1 returned no matching resources.
|
||||
|
||||
| Action | Source of ID |
|
||||
|--------|--------------|
|
||||
| `docker: details` | first container from `docker: list` |
|
||||
| `docker: logs` | first container from `docker: list` |
|
||||
| `docker: network_details` | first network from `docker: networks` |
|
||||
| `storage: disk_details` | first disk from `storage: disks` |
|
||||
| `storage: logs` | first path from `storage: log_files` |
|
||||
| `vm: details` | first VM from `vm: list` |
|
||||
| `keys: get` | first key from `keys: list` |
|
||||
|
||||
### Skipped actions (and why)
|
||||
|
||||
| Label | Meaning |
|
||||
|-------|---------|
|
||||
| `destructive (confirm=True required)` | Permanently modifies or deletes data |
|
||||
| `mutation — state-changing` | Modifies live system state (container/VM lifecycle, settings) |
|
||||
| `mutation — creates …` | Creates a new resource |
|
||||
|
||||
**Full skip list:**
|
||||
- `unraid_info`: `update_server`, `update_ssh`
|
||||
- `unraid_array`: `parity_start`, `parity_pause`, `parity_resume`, `parity_cancel`
|
||||
- `unraid_storage`: `flash_backup`
|
||||
- `unraid_docker`: `start`, `stop`, `restart`, `pause`, `unpause`, `update`, `remove`, `update_all`, `create_folder`, `set_folder_children`, `delete_entries`, `move_to_folder`, `move_to_position`, `rename_folder`, `create_folder_with_items`, `update_view_prefs`, `reset_template_mappings`
|
||||
- `unraid_vm`: `start`, `stop`, `pause`, `resume`, `reboot`, `force_stop`, `reset`
|
||||
- `unraid_notifications`: `create`, `create_unique`, `archive`, `unread`, `archive_all`, `archive_many`, `unarchive_many`, `unarchive_all`, `delete`, `delete_archived`
|
||||
- `unraid_rclone`: `create_remote`, `delete_remote`
|
||||
- `unraid_keys`: `create`, `update`, `delete`
|
||||
- `unraid_settings`: all 9 actions
|
||||
|
||||
### Output format
|
||||
|
||||
```
|
||||
<action label> PASS
|
||||
<action label> FAIL
|
||||
<first 3 lines of error detail>
|
||||
<action label> SKIP (reason)
|
||||
|
||||
Results: 42 passed 0 failed 37 skipped (79 total)
|
||||
```
|
||||
|
||||
Exit code `0` when all executed tests pass, `1` if any fail.
|
||||
|
||||
---
|
||||
|
||||
## Destructive Actions
|
||||
|
||||
Neither script executes destructive actions. They are explicitly `skip_test`-ed with reason `"destructive (confirm=True required)"`.
|
||||
|
||||
All destructive actions require `confirm=True` at the call site. There is no environment variable gate — `confirm` is the sole guard.
|
||||
|
||||
### Safe Testing Strategy
|
||||
|
||||
| Strategy | When to use |
|
||||
|----------|-------------|
|
||||
| **Create → destroy** | Action has a create counterpart (keys, notifications, rclone remotes, docker folders) |
|
||||
| **No-op apply** | Action mutates config but can be re-applied with current values unchanged (`update_ssh`) |
|
||||
| **Dedicated test remote** | Action requires a remote target (`flash_backup`) |
|
||||
| **Test VM** | Action requires a live VM (`force_stop`, `reset`) |
|
||||
| **Mock/safety audit only** | Global blast radius, no safe isolation (`update_all`, `reset_template_mappings`, `setup_remote_access`, `configure_ups`) |
|
||||
| **Secondary server only** | Run on `shart` (10.1.0.3), never `tootie` (10.1.0.2) |
|
||||
|
||||
For exact per-action mcporter commands, see [`docs/DESTRUCTIVE_ACTIONS.md`](../../docs/DESTRUCTIVE_ACTIONS.md).
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
```bash
|
||||
# mcporter CLI
|
||||
npm install -g mcporter
|
||||
|
||||
# uv (for test-tools.sh stdio mode)
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
# python3 — used for inline JSON extraction
|
||||
python3 --version # 3.12+
|
||||
|
||||
# Running server (for test-actions.sh only)
|
||||
docker compose up -d
|
||||
# or
|
||||
uv run unraid-mcp-server
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Cleanup
|
||||
|
||||
`test-actions.sh` connects to an existing server and leaves it running; it creates no temporary files. `test-tools.sh` spawns stdio server subprocesses per call — they exit when mcporter finishes each invocation — and may write a timestamped log file under `${TMPDIR:-/tmp}`. Neither script leaves background processes.
|
||||
407
tests/mcporter/test-actions.sh
Executable file
407
tests/mcporter/test-actions.sh
Executable file
@@ -0,0 +1,407 @@
|
||||
#!/usr/bin/env bash
|
||||
# test-actions.sh — Test all non-destructive Unraid MCP actions via mcporter
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/test-actions.sh [MCP_URL]
|
||||
#
|
||||
# Default MCP_URL: http://localhost:6970/mcp
|
||||
# Skips: destructive (confirm=True required), state-changing mutations,
|
||||
# and actions requiring IDs not yet discovered.
|
||||
#
|
||||
# Phase 1: param-free reads
|
||||
# Phase 2: ID-discovered reads (container, network, disk, vm, key, log)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
MCP_URL="${1:-${UNRAID_MCP_URL:-http://localhost:6970/mcp}}"
|
||||
|
||||
# ── colours ──────────────────────────────────────────────────────────────────
|
||||
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'; BOLD='\033[1m'; NC='\033[0m'
|
||||
|
||||
PASS=0; FAIL=0; SKIP=0
|
||||
declare -a FAILED_TESTS=()
|
||||
|
||||
# ── helpers ───────────────────────────────────────────────────────────────────
|
||||
|
||||
mcall() {
|
||||
# mcall <tool> <json-args>
|
||||
local tool="$1" args="$2"
|
||||
mcporter call \
|
||||
--http-url "$MCP_URL" \
|
||||
--allow-http \
|
||||
--tool "$tool" \
|
||||
--args "$args" \
|
||||
--output json \
|
||||
2>&1
|
||||
}
|
||||
|
||||
_check_output() {
|
||||
# Returns 0 if output looks like a successful JSON response, 1 otherwise.
|
||||
local output="$1" exit_code="$2"
|
||||
[[ $exit_code -ne 0 ]] && return 1
|
||||
echo "$output" | python3 -c "
|
||||
import json, sys
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
if isinstance(d, dict) and (d.get('isError') or d.get('error') or 'ToolError' in str(d)):
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
pass
|
||||
sys.exit(0)
|
||||
" 2>/dev/null
|
||||
}
|
||||
|
||||
run_test() {
|
||||
# Print result; do NOT echo the JSON body (kept quiet for readability).
|
||||
local label="$1" tool="$2" args="$3"
|
||||
printf " %-60s" "$label"
|
||||
local output exit_code=0
|
||||
output=$(mcall "$tool" "$args" 2>&1) || exit_code=$?
|
||||
if _check_output "$output" "$exit_code"; then
|
||||
echo -e "${GREEN}PASS${NC}"
|
||||
((PASS++)) || true
|
||||
else
|
||||
echo -e "${RED}FAIL${NC}"
|
||||
((FAIL++)) || true
|
||||
FAILED_TESTS+=("$label")
|
||||
# Show first 3 lines of error detail, indented
|
||||
echo "$output" | head -3 | sed 's/^/ /'
|
||||
fi
|
||||
}
|
||||
|
||||
run_test_capture() {
|
||||
# Like run_test but echoes raw JSON to stdout for ID extraction by caller.
|
||||
# Status lines go to stderr so the caller's $() captures only clean JSON.
|
||||
local label="$1" tool="$2" args="$3"
|
||||
local output exit_code=0
|
||||
printf " %-60s" "$label" >&2
|
||||
output=$(mcall "$tool" "$args" 2>&1) || exit_code=$?
|
||||
if _check_output "$output" "$exit_code"; then
|
||||
echo -e "${GREEN}PASS${NC}" >&2
|
||||
((PASS++)) || true
|
||||
else
|
||||
echo -e "${RED}FAIL${NC}" >&2
|
||||
((FAIL++)) || true
|
||||
FAILED_TESTS+=("$label")
|
||||
echo "$output" | head -3 | sed 's/^/ /' >&2
|
||||
fi
|
||||
echo "$output" # pure JSON → captured by caller's $()
|
||||
}
|
||||
|
||||
extract_id() {
|
||||
# Extract an ID from JSON output using a Python snippet.
|
||||
# Usage: ID=$(extract_id "$JSON_OUTPUT" "$LABEL" 'python expression')
|
||||
# If JSON parsing fails (malformed mcporter output), record a FAIL.
|
||||
# If parsing succeeds but finds no items, return empty (caller skips).
|
||||
local json_input="$1" label="$2" py_code="$3"
|
||||
local result="" py_exit=0 parse_err=""
|
||||
# Capture stdout (the extracted ID) and stderr (any parse errors) separately.
|
||||
# A temp file is needed because $() can only capture one stream.
|
||||
local errfile
|
||||
errfile=$(mktemp)
|
||||
result=$(echo "$json_input" | python3 -c "$py_code" 2>"$errfile") || py_exit=$?
|
||||
parse_err=$(<"$errfile")
|
||||
rm -f "$errfile"
|
||||
if [[ $py_exit -ne 0 ]]; then
|
||||
printf " %-60s${RED}FAIL${NC} (JSON parse error)\n" "$label" >&2
|
||||
[[ -n "$parse_err" ]] && echo "$parse_err" | head -2 | sed 's/^/ /' >&2
|
||||
((FAIL++)) || true
|
||||
FAILED_TESTS+=("$label (JSON parse)")
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
skip_test() {
|
||||
local label="$1" reason="$2"
|
||||
printf " %-60s${YELLOW}SKIP${NC} (%s)\n" "$label" "$reason"
|
||||
((SKIP++)) || true
|
||||
}
|
||||
|
||||
section() {
|
||||
echo ""
|
||||
echo -e "${CYAN}${BOLD}━━━ $1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
}
|
||||
|
||||
# ── connectivity check ────────────────────────────────────────────────────────
|
||||
|
||||
echo ""
|
||||
echo -e "${BOLD}Unraid MCP Non-Destructive Action Test Suite${NC}"
|
||||
echo -e "Server: ${CYAN}$MCP_URL${NC}"
|
||||
echo ""
|
||||
printf "Checking connectivity... "
|
||||
# Use -s (silent) without -f: a 4xx/406 means the MCP server is up and
|
||||
# responding correctly to a plain GET — only "connection refused" is fatal.
|
||||
# Capture curl's exit code directly — don't mask failures with a fallback.
|
||||
HTTP_CODE=""
|
||||
curl_exit=0
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 "$MCP_URL" 2>/dev/null) || curl_exit=$?
|
||||
if [[ $curl_exit -ne 0 ]]; then
|
||||
echo -e "${RED}UNREACHABLE${NC} (curl exit code: $curl_exit)"
|
||||
echo "Start the server first: docker compose up -d OR uv run unraid-mcp-server"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${GREEN}OK${NC} (HTTP $HTTP_CODE)"
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# PHASE 1 — Param-free read actions
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
section "unraid_info (19 query actions)"
|
||||
run_test "info: overview" unraid_info '{"action":"overview"}'
|
||||
run_test "info: array" unraid_info '{"action":"array"}'
|
||||
run_test "info: network" unraid_info '{"action":"network"}'
|
||||
run_test "info: registration" unraid_info '{"action":"registration"}'
|
||||
run_test "info: connect" unraid_info '{"action":"connect"}'
|
||||
run_test "info: variables" unraid_info '{"action":"variables"}'
|
||||
run_test "info: metrics" unraid_info '{"action":"metrics"}'
|
||||
run_test "info: services" unraid_info '{"action":"services"}'
|
||||
run_test "info: display" unraid_info '{"action":"display"}'
|
||||
run_test "info: config" unraid_info '{"action":"config"}'
|
||||
run_test "info: online" unraid_info '{"action":"online"}'
|
||||
run_test "info: owner" unraid_info '{"action":"owner"}'
|
||||
run_test "info: settings" unraid_info '{"action":"settings"}'
|
||||
run_test "info: server" unraid_info '{"action":"server"}'
|
||||
run_test "info: servers" unraid_info '{"action":"servers"}'
|
||||
run_test "info: flash" unraid_info '{"action":"flash"}'
|
||||
run_test "info: ups_devices" unraid_info '{"action":"ups_devices"}'
|
||||
run_test "info: ups_device" unraid_info '{"action":"ups_device"}'
|
||||
run_test "info: ups_config" unraid_info '{"action":"ups_config"}'
|
||||
skip_test "info: update_server" "mutation — state-changing"
|
||||
skip_test "info: update_ssh" "mutation — state-changing"
|
||||
|
||||
section "unraid_array"
|
||||
run_test "array: parity_status" unraid_array '{"action":"parity_status"}'
|
||||
skip_test "array: parity_start" "mutation — starts parity check"
|
||||
skip_test "array: parity_pause" "mutation — pauses parity check"
|
||||
skip_test "array: parity_resume" "mutation — resumes parity check"
|
||||
skip_test "array: parity_cancel" "mutation — cancels parity check"
|
||||
|
||||
section "unraid_storage (param-free reads)"
|
||||
STORAGE_DISKS=$(run_test_capture "storage: disks" unraid_storage '{"action":"disks"}')
|
||||
run_test "storage: shares" unraid_storage '{"action":"shares"}'
|
||||
run_test "storage: unassigned" unraid_storage '{"action":"unassigned"}'
|
||||
LOG_FILES=$(run_test_capture "storage: log_files" unraid_storage '{"action":"log_files"}')
|
||||
skip_test "storage: flash_backup" "destructive (confirm=True required)"
|
||||
|
||||
section "unraid_docker (param-free reads)"
|
||||
DOCKER_LIST=$(run_test_capture "docker: list" unraid_docker '{"action":"list"}')
|
||||
DOCKER_NETS=$(run_test_capture "docker: networks" unraid_docker '{"action":"networks"}')
|
||||
run_test "docker: port_conflicts" unraid_docker '{"action":"port_conflicts"}'
|
||||
run_test "docker: check_updates" unraid_docker '{"action":"check_updates"}'
|
||||
run_test "docker: sync_templates" unraid_docker '{"action":"sync_templates"}'
|
||||
run_test "docker: refresh_digests" unraid_docker '{"action":"refresh_digests"}'
|
||||
skip_test "docker: start" "mutation — changes container state"
|
||||
skip_test "docker: stop" "mutation — changes container state"
|
||||
skip_test "docker: restart" "mutation — changes container state"
|
||||
skip_test "docker: pause" "mutation — changes container state"
|
||||
skip_test "docker: unpause" "mutation — changes container state"
|
||||
skip_test "docker: update" "mutation — updates container image"
|
||||
skip_test "docker: remove" "destructive (confirm=True required)"
|
||||
skip_test "docker: update_all" "destructive (confirm=True required)"
|
||||
skip_test "docker: create_folder" "mutation — changes organizer state"
|
||||
skip_test "docker: set_folder_children" "mutation — changes organizer state"
|
||||
skip_test "docker: delete_entries" "destructive (confirm=True required)"
|
||||
skip_test "docker: move_to_folder" "mutation — changes organizer state"
|
||||
skip_test "docker: move_to_position" "mutation — changes organizer state"
|
||||
skip_test "docker: rename_folder" "mutation — changes organizer state"
|
||||
skip_test "docker: create_folder_with_items" "mutation — changes organizer state"
|
||||
skip_test "docker: update_view_prefs" "mutation — changes organizer state"
|
||||
skip_test "docker: reset_template_mappings" "destructive (confirm=True required)"
|
||||
|
||||
section "unraid_vm (param-free reads)"
|
||||
VM_LIST=$(run_test_capture "vm: list" unraid_vm '{"action":"list"}')
|
||||
skip_test "vm: start" "mutation — changes VM state"
|
||||
skip_test "vm: stop" "mutation — changes VM state"
|
||||
skip_test "vm: pause" "mutation — changes VM state"
|
||||
skip_test "vm: resume" "mutation — changes VM state"
|
||||
skip_test "vm: reboot" "mutation — changes VM state"
|
||||
skip_test "vm: force_stop" "destructive (confirm=True required)"
|
||||
skip_test "vm: reset" "destructive (confirm=True required)"
|
||||
|
||||
section "unraid_notifications"
|
||||
run_test "notifications: overview" unraid_notifications '{"action":"overview"}'
|
||||
run_test "notifications: list" unraid_notifications '{"action":"list"}'
|
||||
run_test "notifications: warnings" unraid_notifications '{"action":"warnings"}'
|
||||
run_test "notifications: recalculate" unraid_notifications '{"action":"recalculate"}'
|
||||
skip_test "notifications: create" "mutation — creates notification"
|
||||
skip_test "notifications: create_unique" "mutation — creates notification"
|
||||
skip_test "notifications: archive" "mutation — changes notification state"
|
||||
skip_test "notifications: unread" "mutation — changes notification state"
|
||||
skip_test "notifications: archive_all" "mutation — changes notification state"
|
||||
skip_test "notifications: archive_many" "mutation — changes notification state"
|
||||
skip_test "notifications: unarchive_many" "mutation — changes notification state"
|
||||
skip_test "notifications: unarchive_all" "mutation — changes notification state"
|
||||
skip_test "notifications: delete" "destructive (confirm=True required)"
|
||||
skip_test "notifications: delete_archived" "destructive (confirm=True required)"
|
||||
|
||||
section "unraid_rclone"
|
||||
run_test "rclone: list_remotes" unraid_rclone '{"action":"list_remotes"}'
|
||||
run_test "rclone: config_form" unraid_rclone '{"action":"config_form"}'
|
||||
skip_test "rclone: create_remote" "mutation — creates remote"
|
||||
skip_test "rclone: delete_remote" "destructive (confirm=True required)"
|
||||
|
||||
section "unraid_users"
|
||||
run_test "users: me" unraid_users '{"action":"me"}'
|
||||
|
||||
section "unraid_keys"
|
||||
KEYS_LIST=$(run_test_capture "keys: list" unraid_keys '{"action":"list"}')
|
||||
skip_test "keys: create" "mutation — creates API key"
|
||||
skip_test "keys: update" "mutation — modifies API key"
|
||||
skip_test "keys: delete" "destructive (confirm=True required)"
|
||||
|
||||
section "unraid_health"
|
||||
run_test "health: check" unraid_health '{"action":"check"}'
|
||||
run_test "health: test_connection" unraid_health '{"action":"test_connection"}'
|
||||
run_test "health: diagnose" unraid_health '{"action":"diagnose"}'
|
||||
|
||||
section "unraid_settings (all mutations — skipped)"
|
||||
skip_test "settings: update" "mutation — modifies settings"
|
||||
skip_test "settings: update_temperature" "mutation — modifies settings"
|
||||
skip_test "settings: update_time" "mutation — modifies settings"
|
||||
skip_test "settings: configure_ups" "destructive (confirm=True required)"
|
||||
skip_test "settings: update_api" "mutation — modifies settings"
|
||||
skip_test "settings: connect_sign_in" "mutation — authentication action"
|
||||
skip_test "settings: connect_sign_out" "mutation — authentication action"
|
||||
skip_test "settings: setup_remote_access" "destructive (confirm=True required)"
|
||||
skip_test "settings: enable_dynamic_remote_access" "destructive (confirm=True required)"
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# PHASE 2 — ID-discovered read actions
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
section "Phase 2: ID-discovered reads"
|
||||
|
||||
# ── docker container ID ───────────────────────────────────────────────────────
|
||||
CONTAINER_ID=$(extract_id "$DOCKER_LIST" "docker: extract container ID" "
|
||||
import json, sys
|
||||
d = json.load(sys.stdin)
|
||||
containers = d.get('containers') or d.get('data', {}).get('containers') or []
|
||||
if isinstance(containers, list) and containers:
|
||||
c = containers[0]
|
||||
cid = c.get('id') or c.get('names', [''])[0].lstrip('/')
|
||||
if cid:
|
||||
print(cid)
|
||||
")
|
||||
|
||||
if [[ -n "$CONTAINER_ID" ]]; then
|
||||
run_test "docker: details (id=$CONTAINER_ID)" \
|
||||
unraid_docker "{\"action\":\"details\",\"container_id\":\"$CONTAINER_ID\"}"
|
||||
run_test "docker: logs (id=$CONTAINER_ID)" \
|
||||
unraid_docker "{\"action\":\"logs\",\"container_id\":\"$CONTAINER_ID\",\"tail_lines\":20}"
|
||||
else
|
||||
skip_test "docker: details" "no containers found to discover ID"
|
||||
skip_test "docker: logs" "no containers found to discover ID"
|
||||
fi
|
||||
|
||||
# ── docker network ID ─────────────────────────────────────────────────────────
|
||||
NETWORK_ID=$(extract_id "$DOCKER_NETS" "docker: extract network ID" "
|
||||
import json, sys
|
||||
d = json.load(sys.stdin)
|
||||
nets = d.get('networks') or d.get('data', {}).get('networks') or []
|
||||
if isinstance(nets, list) and nets:
|
||||
nid = nets[0].get('id') or nets[0].get('Id')
|
||||
if nid:
|
||||
print(nid)
|
||||
")
|
||||
|
||||
if [[ -n "$NETWORK_ID" ]]; then
|
||||
run_test "docker: network_details (id=$NETWORK_ID)" \
|
||||
unraid_docker "{\"action\":\"network_details\",\"network_id\":\"$NETWORK_ID\"}"
|
||||
else
|
||||
skip_test "docker: network_details" "no networks found to discover ID"
|
||||
fi
|
||||
|
||||
# ── disk ID ───────────────────────────────────────────────────────────────────
|
||||
DISK_ID=$(extract_id "$STORAGE_DISKS" "storage: extract disk ID" "
|
||||
import json, sys
|
||||
d = json.load(sys.stdin)
|
||||
disks = d.get('disks') or d.get('data', {}).get('disks') or []
|
||||
if isinstance(disks, list) and disks:
|
||||
did = disks[0].get('id') or disks[0].get('device')
|
||||
if did:
|
||||
print(did)
|
||||
")
|
||||
|
||||
if [[ -n "$DISK_ID" ]]; then
|
||||
run_test "storage: disk_details (id=$DISK_ID)" \
|
||||
unraid_storage "{\"action\":\"disk_details\",\"disk_id\":\"$DISK_ID\"}"
|
||||
else
|
||||
skip_test "storage: disk_details" "no disks found to discover ID"
|
||||
fi
|
||||
|
||||
# ── log path ──────────────────────────────────────────────────────────────────
|
||||
LOG_PATH=$(extract_id "$LOG_FILES" "storage: extract log path" "
|
||||
import json, sys
|
||||
d = json.load(sys.stdin)
|
||||
files = d.get('log_files') or d.get('files') or d.get('data', {}).get('log_files') or []
|
||||
if isinstance(files, list) and files:
|
||||
p = files[0].get('path') or (files[0] if isinstance(files[0], str) else None)
|
||||
if p:
|
||||
print(p)
|
||||
")
|
||||
|
||||
if [[ -n "$LOG_PATH" ]]; then
|
||||
run_test "storage: logs (path=$LOG_PATH)" \
|
||||
unraid_storage "{\"action\":\"logs\",\"log_path\":\"$LOG_PATH\",\"tail_lines\":20}"
|
||||
else
|
||||
skip_test "storage: logs" "no log files found to discover path"
|
||||
fi
|
||||
|
||||
# ── VM ID ─────────────────────────────────────────────────────────────────────
|
||||
VM_ID=$(extract_id "$VM_LIST" "vm: extract VM ID" "
|
||||
import json, sys
|
||||
d = json.load(sys.stdin)
|
||||
vms = d.get('vms') or d.get('data', {}).get('vms') or []
|
||||
if isinstance(vms, list) and vms:
|
||||
vid = vms[0].get('uuid') or vms[0].get('id') or vms[0].get('name')
|
||||
if vid:
|
||||
print(vid)
|
||||
")
|
||||
|
||||
if [[ -n "$VM_ID" ]]; then
|
||||
run_test "vm: details (id=$VM_ID)" \
|
||||
unraid_vm "{\"action\":\"details\",\"vm_id\":\"$VM_ID\"}"
|
||||
else
|
||||
skip_test "vm: details" "no VMs found to discover ID"
|
||||
fi
|
||||
|
||||
# ── API key ID ────────────────────────────────────────────────────────────────
|
||||
KEY_ID=$(extract_id "$KEYS_LIST" "keys: extract key ID" "
|
||||
import json, sys
|
||||
d = json.load(sys.stdin)
|
||||
keys = d.get('keys') or d.get('apiKeys') or d.get('data', {}).get('keys') or []
|
||||
if isinstance(keys, list) and keys:
|
||||
kid = keys[0].get('id')
|
||||
if kid:
|
||||
print(kid)
|
||||
")
|
||||
|
||||
if [[ -n "$KEY_ID" ]]; then
|
||||
run_test "keys: get (id=$KEY_ID)" \
|
||||
unraid_keys "{\"action\":\"get\",\"key_id\":\"$KEY_ID\"}"
|
||||
else
|
||||
skip_test "keys: get" "no API keys found to discover ID"
|
||||
fi
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# SUMMARY
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
TOTAL=$((PASS + FAIL + SKIP))
|
||||
echo ""
|
||||
echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
echo -e "${BOLD}Results: ${GREEN}${PASS} passed${NC} ${RED}${FAIL} failed${NC} ${YELLOW}${SKIP} skipped${NC} (${TOTAL} total)"
|
||||
|
||||
if [[ ${#FAILED_TESTS[@]} -gt 0 ]]; then
|
||||
echo ""
|
||||
echo -e "${RED}${BOLD}Failed tests:${NC}"
|
||||
for t in "${FAILED_TESTS[@]}"; do
|
||||
echo -e " ${RED}✗${NC} $t"
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
[[ $FAIL -eq 0 ]] && exit 0 || exit 1
|
||||
338
tests/mcporter/test-destructive.sh
Executable file
338
tests/mcporter/test-destructive.sh
Executable file
@@ -0,0 +1,338 @@
|
||||
#!/usr/bin/env bash
|
||||
# test-destructive.sh — Safe destructive action tests for unraid-mcp
|
||||
#
|
||||
# Tests all 15 destructive actions using create→destroy and no-op patterns.
|
||||
# Actions with global blast radius (no safe isolation) are skipped.
|
||||
#
|
||||
# Transport: stdio — spawns uv run unraid-mcp-server per call; no running server needed.
|
||||
#
|
||||
# Usage:
|
||||
# ./tests/mcporter/test-destructive.sh [--confirm]
|
||||
#
|
||||
# Options:
|
||||
# --confirm REQUIRED to execute destructive tests; without it, dry-runs only
|
||||
#
|
||||
# Exit codes:
|
||||
# 0 — all executable tests passed (or dry-run)
|
||||
# 1 — one or more tests failed
|
||||
# 2 — prerequisite check failed
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
readonly SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
|
||||
readonly SCRIPT_NAME="$(basename -- "${BASH_SOURCE[0]}")"
|
||||
|
||||
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'; BOLD='\033[1m'; NC='\033[0m'
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Defaults
|
||||
# ---------------------------------------------------------------------------
|
||||
readonly PROJECT_DIR="$(cd -- "${SCRIPT_DIR}/../.." && pwd -P)"
|
||||
CONFIRM=false
|
||||
|
||||
PASS=0; FAIL=0; SKIP=0
|
||||
declare -a FAILED_TESTS=()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Argument parsing
|
||||
# ---------------------------------------------------------------------------
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--confirm) CONFIRM=true; shift ;;
|
||||
-h|--help)
|
||||
printf 'Usage: %s [--confirm]\n' "${SCRIPT_NAME}"
|
||||
exit 0
|
||||
;;
|
||||
*) printf '[ERROR] Unknown argument: %s\n' "$1" >&2; exit 2 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
section() { echo ""; echo -e "${CYAN}${BOLD}━━━ $1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"; }
|
||||
|
||||
pass_test() {
|
||||
printf " %-60s${GREEN}PASS${NC}\n" "$1"
|
||||
((PASS++)) || true
|
||||
}
|
||||
|
||||
fail_test() {
|
||||
local label="$1" reason="$2"
|
||||
printf " %-60s${RED}FAIL${NC}\n" "${label}"
|
||||
printf " %s\n" "${reason}"
|
||||
((FAIL++)) || true
|
||||
FAILED_TESTS+=("${label}")
|
||||
}
|
||||
|
||||
skip_test() {
|
||||
printf " %-60s${YELLOW}SKIP${NC} (%s)\n" "$1" "$2"
|
||||
((SKIP++)) || true
|
||||
}
|
||||
|
||||
dry_run() {
|
||||
printf " %-60s${CYAN}DRY-RUN${NC}\n" "$1"
|
||||
((SKIP++)) || true
|
||||
}
|
||||
|
||||
mcall() {
|
||||
local tool="$1" args="$2"
|
||||
mcporter call \
|
||||
--stdio "uv run --project ${PROJECT_DIR} unraid-mcp-server" \
|
||||
--tool "$tool" \
|
||||
--args "$args" \
|
||||
--output json \
|
||||
2>/dev/null
|
||||
}
|
||||
|
||||
extract() {
|
||||
# extract <json> <python-expression>
|
||||
python3 -c "import json,sys; d=json.loads('''$1'''); print($2)" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Connectivity check
|
||||
# ---------------------------------------------------------------------------
|
||||
echo ""
|
||||
echo -e "${BOLD}Unraid MCP Destructive Action Test Suite${NC}"
|
||||
echo -e "Transport: ${CYAN}stdio (uv run unraid-mcp-server)${NC}"
|
||||
echo -e "Mode: $(${CONFIRM} && echo "${RED}LIVE — destructive actions will execute${NC}" || echo "${YELLOW}DRY-RUN — pass --confirm to execute${NC}")"
|
||||
echo ""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# docker: remove — skipped (two-machine problem)
|
||||
# ---------------------------------------------------------------------------
|
||||
section "docker: remove"
|
||||
skip_test "docker: remove" "requires a pre-existing stopped container on the Unraid server — can't provision via local docker"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# docker: delete_entries — create folder → delete via MCP
|
||||
# ---------------------------------------------------------------------------
|
||||
section "docker: delete_entries"
|
||||
skip_test "docker: delete_entries" "createDockerFolder mutation not available in this Unraid API version (HTTP 400)"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# docker: update_all — mock/safety audit only
|
||||
# ---------------------------------------------------------------------------
|
||||
section "docker: update_all"
|
||||
skip_test "docker: update_all" "global blast radius — restarts all containers; safety audit only"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# docker: reset_template_mappings — mock/safety audit only
|
||||
# ---------------------------------------------------------------------------
|
||||
section "docker: reset_template_mappings"
|
||||
skip_test "docker: reset_template_mappings" "wipes all template mappings globally; safety audit only"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# vm: force_stop — requires manual test VM setup
|
||||
# ---------------------------------------------------------------------------
|
||||
section "vm: force_stop"
|
||||
skip_test "vm: force_stop" "requires pre-created Alpine test VM (no persistent disk)"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# vm: reset — requires manual test VM setup
|
||||
# ---------------------------------------------------------------------------
|
||||
section "vm: reset"
|
||||
skip_test "vm: reset" "requires pre-created Alpine test VM (no persistent disk)"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# notifications: delete — create notification → delete via MCP
|
||||
# ---------------------------------------------------------------------------
|
||||
section "notifications: delete"
|
||||
|
||||
test_notifications_delete() {
|
||||
local label="notifications: delete"
|
||||
|
||||
# Create the notification
|
||||
local create_raw
|
||||
create_raw="$(mcall unraid_notifications \
|
||||
'{"action":"create","title":"mcp-test-delete","subject":"MCP destructive test","description":"Safe to delete","importance":"INFO"}')"
|
||||
local create_ok
|
||||
create_ok="$(python3 -c "import json,sys; d=json.loads('''${create_raw}'''); print(d.get('success', False))" 2>/dev/null)"
|
||||
if [[ "${create_ok}" != "True" ]]; then
|
||||
fail_test "${label}" "create notification failed: ${create_raw}"
|
||||
return
|
||||
fi
|
||||
|
||||
# The create response ID doesn't match the stored filename — list and find by title.
|
||||
# Use the LAST match so a stale notification with the same title is bypassed.
|
||||
local list_raw nid
|
||||
list_raw="$(mcall unraid_notifications '{"action":"list","notification_type":"UNREAD"}')"
|
||||
nid="$(python3 -c "
|
||||
import json,sys
|
||||
d = json.loads('''${list_raw}''')
|
||||
notifs = d.get('notifications', [])
|
||||
# Reverse so the most-recent match wins over any stale leftover
|
||||
matches = [n['id'] for n in reversed(notifs) if n.get('title') == 'mcp-test-delete']
|
||||
print(matches[0] if matches else '')
|
||||
" 2>/dev/null)"
|
||||
|
||||
if [[ -z "${nid}" ]]; then
|
||||
fail_test "${label}" "created notification not found in UNREAD list"
|
||||
return
|
||||
fi
|
||||
|
||||
local del_raw
|
||||
del_raw="$(mcall unraid_notifications \
|
||||
"{\"action\":\"delete\",\"notification_id\":\"${nid}\",\"notification_type\":\"UNREAD\",\"confirm\":true}")"
|
||||
# success=true OR deleteNotification key present (raw GraphQL response) both indicate success
|
||||
local success
|
||||
success="$(python3 -c "
|
||||
import json,sys
|
||||
d=json.loads('''${del_raw}''')
|
||||
ok = d.get('success', False) or ('deleteNotification' in d)
|
||||
print(ok)
|
||||
" 2>/dev/null)"
|
||||
|
||||
if [[ "${success}" != "True" ]]; then
|
||||
# Leak: notification created but not deleted — archive it so it doesn't clutter the feed
|
||||
mcall unraid_notifications "{\"action\":\"archive\",\"notification_id\":\"${nid}\"}" &>/dev/null || true
|
||||
fail_test "${label}" "delete did not return success=true: ${del_raw} (notification archived as fallback cleanup)"
|
||||
return
|
||||
fi
|
||||
|
||||
pass_test "${label}"
|
||||
}
|
||||
|
||||
if ${CONFIRM}; then
|
||||
test_notifications_delete
|
||||
else
|
||||
dry_run "notifications: delete [create notification → mcall unraid_notifications delete]"
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# notifications: delete_archived — bulk wipe; skip (hard to isolate)
|
||||
# ---------------------------------------------------------------------------
|
||||
section "notifications: delete_archived"
|
||||
skip_test "notifications: delete_archived" "bulk wipe of ALL archived notifications; run manually on shart if needed"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# rclone: delete_remote — create local:/tmp remote → delete via MCP
|
||||
# ---------------------------------------------------------------------------
|
||||
section "rclone: delete_remote"
|
||||
skip_test "rclone: delete_remote" "createRCloneRemote broken server-side on this Unraid version (url slash error)"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# keys: delete — create test key → delete via MCP
|
||||
# ---------------------------------------------------------------------------
|
||||
section "keys: delete"
|
||||
|
||||
test_keys_delete() {
|
||||
local label="keys: delete"
|
||||
|
||||
# Guard: abort if test key already exists (don't delete a real key)
|
||||
# Note: API key names cannot contain hyphens — use "mcp test key"
|
||||
local existing_keys
|
||||
existing_keys="$(mcall unraid_keys '{"action":"list"}')"
|
||||
if python3 -c "
|
||||
import json,sys
|
||||
d = json.loads('''${existing_keys}''')
|
||||
keys = d.get('keys', d.get('apiKeys', []))
|
||||
sys.exit(1 if any(k.get('name') == 'mcp test key' for k in keys) else 0)
|
||||
" 2>/dev/null; then
|
||||
: # not found, safe to proceed
|
||||
else
|
||||
fail_test "${label}" "a key named 'mcp test key' already exists — refusing to proceed"
|
||||
return
|
||||
fi
|
||||
|
||||
local create_raw
|
||||
create_raw="$(mcall unraid_keys \
|
||||
'{"action":"create","name":"mcp test key","roles":["VIEWER"]}')"
|
||||
local kid
|
||||
kid="$(python3 -c "import json,sys; d=json.loads('''${create_raw}'''); print(d.get('key',{}).get('id',''))" 2>/dev/null)"
|
||||
|
||||
if [[ -z "${kid}" ]]; then
|
||||
fail_test "${label}" "create key did not return an ID"
|
||||
return
|
||||
fi
|
||||
|
||||
local del_raw
|
||||
del_raw="$(mcall unraid_keys "{\"action\":\"delete\",\"key_id\":\"${kid}\",\"confirm\":true}")"
|
||||
local success
|
||||
success="$(python3 -c "import json,sys; d=json.loads('''${del_raw}'''); print(d.get('success', False))" 2>/dev/null)"
|
||||
|
||||
if [[ "${success}" != "True" ]]; then
|
||||
# Cleanup: attempt to delete the leaked key so future runs are not blocked
|
||||
mcall unraid_keys "{\"action\":\"delete\",\"key_id\":\"${kid}\",\"confirm\":true}" &>/dev/null || true
|
||||
fail_test "${label}" "delete did not return success=true: ${del_raw} (key delete re-attempted as fallback cleanup)"
|
||||
return
|
||||
fi
|
||||
|
||||
# Verify gone
|
||||
local list_raw
|
||||
list_raw="$(mcall unraid_keys '{"action":"list"}')"
|
||||
if python3 -c "
|
||||
import json,sys
|
||||
d = json.loads('''${list_raw}''')
|
||||
keys = d.get('keys', d.get('apiKeys', []))
|
||||
sys.exit(0 if not any(k.get('id') == '${kid}' for k in keys) else 1)
|
||||
" 2>/dev/null; then
|
||||
pass_test "${label}"
|
||||
else
|
||||
fail_test "${label}" "key still present in list after delete"
|
||||
fi
|
||||
}
|
||||
|
||||
if ${CONFIRM}; then
|
||||
test_keys_delete
|
||||
else
|
||||
dry_run "keys: delete [create test key → mcall unraid_keys delete]"
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# storage: flash_backup — requires dedicated test remote
|
||||
# ---------------------------------------------------------------------------
|
||||
section "storage: flash_backup"
|
||||
skip_test "storage: flash_backup" "requires dedicated test remote pre-configured and isolated destination"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# settings: configure_ups — mock/safety audit only
|
||||
# ---------------------------------------------------------------------------
|
||||
section "settings: configure_ups"
|
||||
skip_test "settings: configure_ups" "wrong config breaks UPS monitoring; safety audit only"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# settings: setup_remote_access — mock/safety audit only
|
||||
# ---------------------------------------------------------------------------
|
||||
section "settings: setup_remote_access"
|
||||
skip_test "settings: setup_remote_access" "misconfiguration can lock out remote access; safety audit only"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# settings: enable_dynamic_remote_access — shart only, toggle false → restore
|
||||
# ---------------------------------------------------------------------------
|
||||
section "settings: enable_dynamic_remote_access"
|
||||
skip_test "settings: enable_dynamic_remote_access" "run manually on shart (10.1.0.3) only — see docs/DESTRUCTIVE_ACTIONS.md"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# info: update_ssh — read current values, re-apply same (no-op)
|
||||
# ---------------------------------------------------------------------------
|
||||
section "info: update_ssh"
|
||||
skip_test "info: update_ssh" "updateSshSettings mutation not available in this Unraid API version (HTTP 400)"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Summary
|
||||
# ---------------------------------------------------------------------------
|
||||
TOTAL=$((PASS + FAIL + SKIP))
|
||||
echo ""
|
||||
echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
echo -e "${BOLD}Results: ${GREEN}${PASS} passed${NC} ${RED}${FAIL} failed${NC} ${YELLOW}${SKIP} skipped${NC} (${TOTAL} total)"
|
||||
|
||||
if [[ ${#FAILED_TESTS[@]} -gt 0 ]]; then
|
||||
echo ""
|
||||
echo -e "${RED}${BOLD}Failed tests:${NC}"
|
||||
for t in "${FAILED_TESTS[@]}"; do
|
||||
echo -e " ${RED}✗${NC} ${t}"
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if ! ${CONFIRM}; then
|
||||
echo -e "${YELLOW}Dry-run complete. Pass --confirm to execute destructive tests.${NC}"
|
||||
fi
|
||||
|
||||
[[ ${FAIL} -eq 0 ]] && exit 0 || exit 1
|
||||
781
tests/mcporter/test-tools.sh
Executable file
781
tests/mcporter/test-tools.sh
Executable file
@@ -0,0 +1,781 @@
|
||||
#!/usr/bin/env bash
|
||||
# =============================================================================
|
||||
# test-tools.sh — Integration smoke-test for unraid-mcp MCP server tools
|
||||
#
|
||||
# Exercises broad non-destructive smoke coverage of the consolidated `unraid` tool
|
||||
# (action + subaction pattern). The server is launched ad-hoc via mcporter's
|
||||
# --stdio flag so no persistent process or registered server entry is required.
|
||||
#
|
||||
# Usage:
|
||||
# ./tests/mcporter/test-tools.sh [--timeout-ms N] [--parallel] [--verbose]
|
||||
#
|
||||
# Options:
|
||||
# --timeout-ms N Per-call timeout in milliseconds (default: 25000)
|
||||
# --parallel Run independent test groups in parallel (default: off)
|
||||
# --verbose Print raw mcporter output for each call
|
||||
#
|
||||
# Exit codes:
|
||||
# 0 — all tests passed or skipped
|
||||
# 1 — one or more tests failed
|
||||
# 2 — prerequisite check failed (mcporter, uv, server startup)
|
||||
# =============================================================================
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
readonly SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
|
||||
readonly PROJECT_DIR="$(cd -- "${SCRIPT_DIR}/../.." && pwd -P)"
|
||||
readonly SCRIPT_NAME="$(basename -- "${BASH_SOURCE[0]}")"
|
||||
readonly TS_START="$(date +%s%N)" # nanosecond epoch
|
||||
readonly LOG_FILE="${TMPDIR:-/tmp}/${SCRIPT_NAME%.sh}.$(date +%Y%m%d-%H%M%S).log"
|
||||
|
||||
# Colours (disabled automatically when stdout is not a terminal)
|
||||
if [[ -t 1 ]]; then
|
||||
C_RESET='\033[0m'
|
||||
C_BOLD='\033[1m'
|
||||
C_GREEN='\033[0;32m'
|
||||
C_RED='\033[0;31m'
|
||||
C_YELLOW='\033[0;33m'
|
||||
C_CYAN='\033[0;36m'
|
||||
C_DIM='\033[2m'
|
||||
else
|
||||
C_RESET='' C_BOLD='' C_GREEN='' C_RED='' C_YELLOW='' C_CYAN='' C_DIM=''
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Defaults (overridable via flags)
|
||||
# ---------------------------------------------------------------------------
|
||||
CALL_TIMEOUT_MS=25000
|
||||
USE_PARALLEL=false
|
||||
VERBOSE=false
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Counters (updated by run_test / skip_test)
|
||||
# ---------------------------------------------------------------------------
|
||||
PASS_COUNT=0
|
||||
FAIL_COUNT=0
|
||||
SKIP_COUNT=0
|
||||
declare -a FAIL_NAMES=()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Argument parsing
|
||||
# ---------------------------------------------------------------------------
|
||||
parse_args() {
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--timeout-ms)
|
||||
CALL_TIMEOUT_MS="${2:?--timeout-ms requires a value}"
|
||||
shift 2
|
||||
;;
|
||||
--parallel)
|
||||
USE_PARALLEL=true
|
||||
shift
|
||||
;;
|
||||
--verbose)
|
||||
VERBOSE=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
printf 'Usage: %s [--timeout-ms N] [--parallel] [--verbose]\n' "${SCRIPT_NAME}"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
printf '[ERROR] Unknown argument: %s\n' "$1" >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Logging helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
log_info() { printf "${C_CYAN}[INFO]${C_RESET} %s\n" "$*" | tee -a "${LOG_FILE}"; }
|
||||
log_warn() { printf "${C_YELLOW}[WARN]${C_RESET} %s\n" "$*" | tee -a "${LOG_FILE}"; }
|
||||
log_error() { printf "${C_RED}[ERROR]${C_RESET} %s\n" "$*" | tee -a "${LOG_FILE}" >&2; }
|
||||
|
||||
elapsed_ms() {
|
||||
local now
|
||||
now="$(date +%s%N)"
|
||||
printf '%d' "$(( (now - TS_START) / 1000000 ))"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cleanup trap
|
||||
# ---------------------------------------------------------------------------
|
||||
cleanup() {
|
||||
local rc=$?
|
||||
if [[ $rc -ne 0 ]]; then
|
||||
log_warn "Script exited with rc=${rc}. Log: ${LOG_FILE}"
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Prerequisite checks
|
||||
# ---------------------------------------------------------------------------
|
||||
check_prerequisites() {
|
||||
local missing=false
|
||||
|
||||
if ! command -v mcporter &>/dev/null; then
|
||||
log_error "mcporter not found in PATH. Install it and re-run."
|
||||
missing=true
|
||||
fi
|
||||
|
||||
if ! command -v uv &>/dev/null; then
|
||||
log_error "uv not found in PATH. Install it and re-run."
|
||||
missing=true
|
||||
fi
|
||||
|
||||
if ! command -v python3 &>/dev/null; then
|
||||
log_error "python3 not found in PATH."
|
||||
missing=true
|
||||
fi
|
||||
|
||||
if [[ ! -f "${PROJECT_DIR}/pyproject.toml" ]]; then
|
||||
log_error "pyproject.toml not found at ${PROJECT_DIR}. Wrong directory?"
|
||||
missing=true
|
||||
fi
|
||||
|
||||
if [[ "${missing}" == true ]]; then
|
||||
return 2
|
||||
fi
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Server startup smoke-test
|
||||
# Launches the stdio server and calls unraid action=health subaction=check.
|
||||
# Returns 0 if the server responds, non-zero on import failure.
|
||||
# ---------------------------------------------------------------------------
|
||||
smoke_test_server() {
|
||||
log_info "Smoke-testing server startup..."
|
||||
|
||||
local output
|
||||
output="$(
|
||||
mcporter call \
|
||||
--stdio "uv run unraid-mcp-server" \
|
||||
--cwd "${PROJECT_DIR}" \
|
||||
--name "unraid-smoke" \
|
||||
--tool unraid \
|
||||
--args '{"action":"health","subaction":"check"}' \
|
||||
--timeout 30000 \
|
||||
--output json \
|
||||
2>&1
|
||||
)" || true
|
||||
|
||||
if printf '%s' "${output}" | grep -q '"kind": "offline"'; then
|
||||
log_error "Server failed to start. Output:"
|
||||
printf '%s\n' "${output}" >&2
|
||||
log_error "Common causes:"
|
||||
log_error " • Missing module: check 'uv run unraid-mcp-server' locally"
|
||||
log_error " • server.py has an import for a file that doesn't exist yet"
|
||||
log_error " • Environment variable UNRAID_API_URL or UNRAID_API_KEY missing"
|
||||
return 2
|
||||
fi
|
||||
|
||||
local key_check
|
||||
key_check="$(
|
||||
printf '%s' "${output}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
if 'status' in d or 'success' in d or 'error' in d:
|
||||
print('ok')
|
||||
else:
|
||||
print('missing: no status/success/error key in response')
|
||||
except Exception as e:
|
||||
print('parse_error: ' + str(e))
|
||||
" 2>/dev/null
|
||||
)" || key_check="parse_error"
|
||||
|
||||
if [[ "${key_check}" != "ok" ]]; then
|
||||
log_error "Smoke test: unexpected response shape — ${key_check}"
|
||||
printf '%s\n' "${output}" >&2
|
||||
return 2
|
||||
fi
|
||||
|
||||
log_info "Server started successfully (health response received)."
|
||||
return 0
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# mcporter call wrapper
|
||||
# Usage: mcporter_call <args_json>
|
||||
# All calls go to the single `unraid` tool.
|
||||
# ---------------------------------------------------------------------------
|
||||
mcporter_call() {
|
||||
local args_json="${1:?args_json required}"
|
||||
|
||||
mcporter call \
|
||||
--stdio "uv run unraid-mcp-server" \
|
||||
--cwd "${PROJECT_DIR}" \
|
||||
--name "unraid" \
|
||||
--tool unraid \
|
||||
--args "${args_json}" \
|
||||
--timeout "${CALL_TIMEOUT_MS}" \
|
||||
--output json \
|
||||
2>&1
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test runner
|
||||
# Usage: run_test <label> <args_json> [expected_key]
|
||||
# ---------------------------------------------------------------------------
|
||||
run_test() {
|
||||
local label="${1:?label required}"
|
||||
local args="${2:?args required}"
|
||||
local expected_key="${3:-}"
|
||||
|
||||
local t0
|
||||
t0="$(date +%s%N)"
|
||||
|
||||
local output
|
||||
output="$(mcporter_call "${args}" 2>&1)" || true
|
||||
|
||||
local elapsed_ms
|
||||
elapsed_ms="$(( ( $(date +%s%N) - t0 ) / 1000000 ))"
|
||||
|
||||
if [[ "${VERBOSE}" == true ]]; then
|
||||
printf '%s\n' "${output}" | tee -a "${LOG_FILE}"
|
||||
else
|
||||
printf '%s\n' "${output}" >> "${LOG_FILE}"
|
||||
fi
|
||||
|
||||
# Detect server-offline (import/startup failure)
|
||||
if printf '%s' "${output}" | grep -q '"kind": "offline"'; then
|
||||
printf "${C_RED}[FAIL]${C_RESET} %-55s ${C_DIM}%dms${C_RESET}\n" \
|
||||
"${label}" "${elapsed_ms}" | tee -a "${LOG_FILE}"
|
||||
printf ' server offline — check startup errors in %s\n' "${LOG_FILE}" | tee -a "${LOG_FILE}"
|
||||
FAIL_COUNT=$(( FAIL_COUNT + 1 ))
|
||||
FAIL_NAMES+=("${label}")
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate optional key presence
|
||||
if [[ -n "${expected_key}" ]]; then
|
||||
local key_check
|
||||
key_check="$(
|
||||
printf '%s' "${output}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
keys = '${expected_key}'.split('.')
|
||||
node = d
|
||||
for k in keys:
|
||||
if k:
|
||||
node = node[k]
|
||||
print('ok')
|
||||
except Exception as e:
|
||||
print('missing: ' + str(e))
|
||||
" 2>/dev/null
|
||||
)" || key_check="parse_error"
|
||||
|
||||
if [[ "${key_check}" != "ok" ]]; then
|
||||
printf "${C_RED}[FAIL]${C_RESET} %-55s ${C_DIM}%dms${C_RESET}\n" \
|
||||
"${label}" "${elapsed_ms}" | tee -a "${LOG_FILE}"
|
||||
printf ' expected key .%s not found: %s\n' "${expected_key}" "${key_check}" | tee -a "${LOG_FILE}"
|
||||
FAIL_COUNT=$(( FAIL_COUNT + 1 ))
|
||||
FAIL_NAMES+=("${label}")
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
printf "${C_GREEN}[PASS]${C_RESET} %-55s ${C_DIM}%dms${C_RESET}\n" \
|
||||
"${label}" "${elapsed_ms}" | tee -a "${LOG_FILE}"
|
||||
PASS_COUNT=$(( PASS_COUNT + 1 ))
|
||||
return 0
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Skip helper
|
||||
# ---------------------------------------------------------------------------
|
||||
skip_test() {
|
||||
local label="${1:?label required}"
|
||||
local reason="${2:-prerequisite returned empty}"
|
||||
printf "${C_YELLOW}[SKIP]${C_RESET} %-55s %s\n" "${label}" "${reason}" | tee -a "${LOG_FILE}"
|
||||
SKIP_COUNT=$(( SKIP_COUNT + 1 ))
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Safe JSON payload builder
|
||||
# Usage: _json_payload '<jq-template-with-$vars>' key1=value1 key2=value2 ...
|
||||
# Uses jq --arg to safely encode shell values into JSON, preventing injection
|
||||
# via special characters in variable values (e.g., quotes, backslashes).
|
||||
# ---------------------------------------------------------------------------
|
||||
_json_payload() {
|
||||
local template="${1:?template required}"; shift
|
||||
local jq_args=()
|
||||
local pair k v
|
||||
for pair in "$@"; do
|
||||
k="${pair%%=*}"
|
||||
v="${pair#*=}"
|
||||
jq_args+=(--arg "$k" "$v")
|
||||
done
|
||||
jq -n "${jq_args[@]}" "$template"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ID extractors
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
get_docker_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call '{"action":"docker","subaction":"list"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
containers = d.get('containers', [])
|
||||
if containers:
|
||||
print(containers[0]['id'])
|
||||
except Exception:
|
||||
pass
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
get_network_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call '{"action":"docker","subaction":"networks"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
nets = d.get('networks', [])
|
||||
if nets:
|
||||
print(nets[0]['id'])
|
||||
except Exception:
|
||||
pass
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
get_vm_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call '{"action":"vm","subaction":"list"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
vms = d.get('vms', d.get('domains', []))
|
||||
if vms:
|
||||
print(vms[0].get('id', vms[0].get('uuid', '')))
|
||||
except Exception:
|
||||
pass
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
get_key_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call '{"action":"key","subaction":"list"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
keys = d.get('keys', d.get('apiKeys', []))
|
||||
if keys:
|
||||
print(keys[0].get('id', ''))
|
||||
except Exception:
|
||||
pass
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
get_disk_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call '{"action":"disk","subaction":"disks"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
disks = d.get('disks', [])
|
||||
if disks:
|
||||
print(disks[0]['id'])
|
||||
except Exception:
|
||||
pass
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
get_log_path() {
|
||||
local raw
|
||||
raw="$(mcporter_call '{"action":"disk","subaction":"log_files"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
files = d.get('log_files', [])
|
||||
for f in files:
|
||||
p = f.get('path', '')
|
||||
if p.endswith('.log') or 'syslog' in p or 'messages' in p:
|
||||
print(p)
|
||||
break
|
||||
else:
|
||||
if files:
|
||||
print(files[0]['path'])
|
||||
except Exception:
|
||||
pass
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
get_ups_id() {
|
||||
local raw
|
||||
raw="$(mcporter_call '{"action":"system","subaction":"ups_devices"}' 2>/dev/null)" || return 0
|
||||
printf '%s' "${raw}" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
devs = d.get('ups_devices', d.get('upsDevices', []))
|
||||
if devs:
|
||||
print(devs[0].get('id', devs[0].get('name', '')))
|
||||
except Exception:
|
||||
pass
|
||||
" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Grouped test suites
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
suite_system() {
|
||||
printf '\n%b== system (info/metrics/UPS) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "system: overview" '{"action":"system","subaction":"overview"}'
|
||||
run_test "system: array" '{"action":"system","subaction":"array"}'
|
||||
run_test "system: network" '{"action":"system","subaction":"network"}'
|
||||
run_test "system: registration" '{"action":"system","subaction":"registration"}'
|
||||
run_test "system: variables" '{"action":"system","subaction":"variables"}'
|
||||
run_test "system: metrics" '{"action":"system","subaction":"metrics"}'
|
||||
run_test "system: services" '{"action":"system","subaction":"services"}'
|
||||
run_test "system: display" '{"action":"system","subaction":"display"}'
|
||||
run_test "system: config" '{"action":"system","subaction":"config"}'
|
||||
run_test "system: online" '{"action":"system","subaction":"online"}'
|
||||
run_test "system: owner" '{"action":"system","subaction":"owner"}'
|
||||
run_test "system: settings" '{"action":"system","subaction":"settings"}'
|
||||
run_test "system: server" '{"action":"system","subaction":"server"}'
|
||||
run_test "system: servers" '{"action":"system","subaction":"servers"}'
|
||||
run_test "system: flash" '{"action":"system","subaction":"flash"}'
|
||||
run_test "system: ups_devices" '{"action":"system","subaction":"ups_devices"}'
|
||||
|
||||
local ups_id
|
||||
ups_id="$(get_ups_id)" || ups_id=''
|
||||
if [[ -n "${ups_id}" ]]; then
|
||||
run_test "system: ups_device" \
|
||||
"$(_json_payload '{"action":"system","subaction":"ups_device","device_id":$v}' v="${ups_id}")"
|
||||
run_test "system: ups_config" \
|
||||
"$(_json_payload '{"action":"system","subaction":"ups_config","device_id":$v}' v="${ups_id}")"
|
||||
else
|
||||
skip_test "system: ups_device" "no UPS devices found"
|
||||
skip_test "system: ups_config" "no UPS devices found"
|
||||
fi
|
||||
}
|
||||
|
||||
suite_array() {
|
||||
printf '\n%b== array (read-only) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
run_test "array: parity_status" '{"action":"array","subaction":"parity_status"}'
|
||||
run_test "array: parity_history" '{"action":"array","subaction":"parity_history"}'
|
||||
# Destructive: parity_start/pause/resume/cancel, start_array, stop_array,
|
||||
# add_disk, remove_disk, mount_disk, unmount_disk, clear_disk_stats — skipped
|
||||
}
|
||||
|
||||
suite_disk() {
|
||||
printf '\n%b== disk (storage/shares/logs) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "disk: shares" '{"action":"disk","subaction":"shares"}'
|
||||
run_test "disk: disks" '{"action":"disk","subaction":"disks"}'
|
||||
run_test "disk: log_files" '{"action":"disk","subaction":"log_files"}'
|
||||
|
||||
local disk_id
|
||||
disk_id="$(get_disk_id)" || disk_id=''
|
||||
if [[ -n "${disk_id}" ]]; then
|
||||
run_test "disk: disk_details" \
|
||||
"$(_json_payload '{"action":"disk","subaction":"disk_details","disk_id":$v}' v="${disk_id}")"
|
||||
else
|
||||
skip_test "disk: disk_details" "no disks found"
|
||||
fi
|
||||
|
||||
local log_path
|
||||
log_path="$(get_log_path)" || log_path=''
|
||||
if [[ -n "${log_path}" ]]; then
|
||||
run_test "disk: logs" \
|
||||
"$(_json_payload '{"action":"disk","subaction":"logs","log_path":$v,"tail_lines":20}' v="${log_path}")"
|
||||
else
|
||||
skip_test "disk: logs" "no log files found"
|
||||
fi
|
||||
# Destructive: flash_backup — skipped
|
||||
}
|
||||
|
||||
suite_docker() {
|
||||
printf '\n%b== docker ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "docker: list" '{"action":"docker","subaction":"list"}'
|
||||
run_test "docker: networks" '{"action":"docker","subaction":"networks"}'
|
||||
|
||||
local container_id
|
||||
container_id="$(get_docker_id)" || container_id=''
|
||||
if [[ -n "${container_id}" ]]; then
|
||||
run_test "docker: details" \
|
||||
"$(_json_payload '{"action":"docker","subaction":"details","container_id":$v}' v="${container_id}")"
|
||||
else
|
||||
skip_test "docker: details" "no containers found"
|
||||
fi
|
||||
|
||||
local network_id
|
||||
network_id="$(get_network_id)" || network_id=''
|
||||
if [[ -n "${network_id}" ]]; then
|
||||
run_test "docker: network_details" \
|
||||
"$(_json_payload '{"action":"docker","subaction":"network_details","network_id":$v}' v="${network_id}")"
|
||||
else
|
||||
skip_test "docker: network_details" "no networks found"
|
||||
fi
|
||||
# Destructive/mutating: start/stop/restart — skipped
|
||||
}
|
||||
|
||||
suite_vm() {
|
||||
printf '\n%b== vm ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "vm: list" '{"action":"vm","subaction":"list"}'
|
||||
|
||||
local vm_id
|
||||
vm_id="$(get_vm_id)" || vm_id=''
|
||||
if [[ -n "${vm_id}" ]]; then
|
||||
run_test "vm: details" \
|
||||
"$(_json_payload '{"action":"vm","subaction":"details","vm_id":$v}' v="${vm_id}")"
|
||||
else
|
||||
skip_test "vm: details" "no VMs found (or VM service unavailable)"
|
||||
fi
|
||||
# Destructive: start/stop/pause/resume/force_stop/reboot/reset — skipped
|
||||
}
|
||||
|
||||
suite_notification() {
|
||||
printf '\n%b== notification ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "notification: overview" '{"action":"notification","subaction":"overview"}'
|
||||
run_test "notification: list" '{"action":"notification","subaction":"list"}'
|
||||
run_test "notification: recalculate" '{"action":"notification","subaction":"recalculate"}'
|
||||
# Mutating: create/archive/mark_unread/delete/delete_archived/archive_all/etc. — skipped
|
||||
}
|
||||
|
||||
suite_rclone() {
|
||||
printf '\n%b== rclone ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "rclone: list_remotes" '{"action":"rclone","subaction":"list_remotes"}'
|
||||
run_test "rclone: config_form" '{"action":"rclone","subaction":"config_form","provider_type":"s3"}'
|
||||
# Destructive: create_remote/delete_remote — skipped
|
||||
}
|
||||
|
||||
suite_user() {
|
||||
printf '\n%b== user ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
run_test "user: me" '{"action":"user","subaction":"me"}'
|
||||
}
|
||||
|
||||
suite_key() {
|
||||
printf '\n%b== key (API keys) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "key: list" '{"action":"key","subaction":"list"}'
|
||||
|
||||
local key_id
|
||||
key_id="$(get_key_id)" || key_id=''
|
||||
if [[ -n "${key_id}" ]]; then
|
||||
run_test "key: get" \
|
||||
"$(_json_payload '{"action":"key","subaction":"get","key_id":$v}' v="${key_id}")"
|
||||
else
|
||||
skip_test "key: get" "no API keys found"
|
||||
fi
|
||||
# Destructive: create/update/delete/add_role/remove_role — skipped
|
||||
}
|
||||
|
||||
suite_health() {
|
||||
printf '\n%b== health ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "health: check" '{"action":"health","subaction":"check"}'
|
||||
run_test "health: test_connection" '{"action":"health","subaction":"test_connection"}'
|
||||
run_test "health: diagnose" '{"action":"health","subaction":"diagnose"}'
|
||||
# setup triggers elicitation — skipped
|
||||
}
|
||||
|
||||
suite_customization() {
|
||||
printf '\n%b== customization ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "customization: theme" '{"action":"customization","subaction":"theme"}'
|
||||
run_test "customization: public_theme" '{"action":"customization","subaction":"public_theme"}'
|
||||
run_test "customization: sso_enabled" '{"action":"customization","subaction":"sso_enabled"}'
|
||||
run_test "customization: is_initial_setup" '{"action":"customization","subaction":"is_initial_setup"}'
|
||||
# Mutating: set_theme — skipped
|
||||
}
|
||||
|
||||
suite_plugin() {
|
||||
printf '\n%b== plugin ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "plugin: list" '{"action":"plugin","subaction":"list"}'
|
||||
# Destructive: add/remove — skipped
|
||||
}
|
||||
|
||||
suite_oidc() {
|
||||
printf '\n%b== oidc ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
|
||||
run_test "oidc: providers" '{"action":"oidc","subaction":"providers"}'
|
||||
run_test "oidc: public_providers" '{"action":"oidc","subaction":"public_providers"}'
|
||||
run_test "oidc: configuration" '{"action":"oidc","subaction":"configuration"}'
|
||||
# provider and validate_session require IDs — skipped
|
||||
}
|
||||
|
||||
suite_live() {
|
||||
printf '\n%b== live (snapshot subscriptions) ==%b\n' "${C_BOLD}" "${C_RESET}" | tee -a "${LOG_FILE}"
|
||||
# Note: these subactions open a transient WebSocket and wait for the first event.
|
||||
# Event-driven actions (parity_progress, ups_status, notifications_overview,
|
||||
# owner, server_status) return status=no_recent_events when no events arrive.
|
||||
run_test "live: cpu" '{"action":"live","subaction":"cpu"}'
|
||||
run_test "live: memory" '{"action":"live","subaction":"memory"}'
|
||||
run_test "live: cpu_telemetry" '{"action":"live","subaction":"cpu_telemetry"}'
|
||||
run_test "live: notifications_overview" '{"action":"live","subaction":"notifications_overview"}'
|
||||
run_test "live: log_tail" '{"action":"live","subaction":"log_tail"}'
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Print final summary
|
||||
# ---------------------------------------------------------------------------
|
||||
print_summary() {
|
||||
local total_ms="$(( ( $(date +%s%N) - TS_START ) / 1000000 ))"
|
||||
local total=$(( PASS_COUNT + FAIL_COUNT + SKIP_COUNT ))
|
||||
|
||||
printf '\n%b%s%b\n' "${C_BOLD}" "$(printf '=%.0s' {1..65})" "${C_RESET}"
|
||||
printf '%b%-20s%b %b%d%b\n' "${C_BOLD}" "PASS" "${C_RESET}" "${C_GREEN}" "${PASS_COUNT}" "${C_RESET}"
|
||||
printf '%b%-20s%b %b%d%b\n' "${C_BOLD}" "FAIL" "${C_RESET}" "${C_RED}" "${FAIL_COUNT}" "${C_RESET}"
|
||||
printf '%b%-20s%b %b%d%b\n' "${C_BOLD}" "SKIP" "${C_RESET}" "${C_YELLOW}" "${SKIP_COUNT}" "${C_RESET}"
|
||||
printf '%b%-20s%b %d\n' "${C_BOLD}" "TOTAL" "${C_RESET}" "${total}"
|
||||
printf '%b%-20s%b %ds (%dms)\n' "${C_BOLD}" "ELAPSED" "${C_RESET}" \
|
||||
"$(( total_ms / 1000 ))" "${total_ms}"
|
||||
printf '%b%s%b\n' "${C_BOLD}" "$(printf '=%.0s' {1..65})" "${C_RESET}"
|
||||
|
||||
if [[ "${FAIL_COUNT}" -gt 0 ]]; then
|
||||
printf '\n%bFailed tests:%b\n' "${C_RED}" "${C_RESET}"
|
||||
local name
|
||||
for name in "${FAIL_NAMES[@]}"; do
|
||||
printf ' • %s\n' "${name}"
|
||||
done
|
||||
printf '\nFull log: %s\n' "${LOG_FILE}"
|
||||
fi
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Parallel runner
|
||||
# ---------------------------------------------------------------------------
|
||||
run_parallel() {
|
||||
log_warn "--parallel mode: per-suite counters aggregated via temp files."
|
||||
|
||||
local tmp_dir
|
||||
tmp_dir="$(mktemp -d)"
|
||||
trap 'rm -rf -- "${tmp_dir}"' RETURN
|
||||
|
||||
local suites=(
|
||||
suite_system
|
||||
suite_array
|
||||
suite_disk
|
||||
suite_docker
|
||||
suite_vm
|
||||
suite_notification
|
||||
suite_rclone
|
||||
suite_user
|
||||
suite_key
|
||||
suite_health
|
||||
suite_customization
|
||||
suite_plugin
|
||||
suite_oidc
|
||||
suite_live
|
||||
)
|
||||
|
||||
local pids=()
|
||||
local suite
|
||||
for suite in "${suites[@]}"; do
|
||||
(
|
||||
PASS_COUNT=0; FAIL_COUNT=0; SKIP_COUNT=0; FAIL_NAMES=()
|
||||
"${suite}"
|
||||
printf '%d %d %d\n' "${PASS_COUNT}" "${FAIL_COUNT}" "${SKIP_COUNT}" \
|
||||
> "${tmp_dir}/${suite}.counts"
|
||||
printf '%s\n' "${FAIL_NAMES[@]:-}" > "${tmp_dir}/${suite}.fails"
|
||||
) &
|
||||
pids+=($!)
|
||||
done
|
||||
|
||||
local pid
|
||||
for pid in "${pids[@]}"; do
|
||||
wait "${pid}" || true
|
||||
done
|
||||
|
||||
local f
|
||||
for f in "${tmp_dir}"/*.counts; do
|
||||
[[ -f "${f}" ]] || continue
|
||||
local p fl s
|
||||
read -r p fl s < "${f}"
|
||||
PASS_COUNT=$(( PASS_COUNT + p ))
|
||||
FAIL_COUNT=$(( FAIL_COUNT + fl ))
|
||||
SKIP_COUNT=$(( SKIP_COUNT + s ))
|
||||
done
|
||||
|
||||
for f in "${tmp_dir}"/*.fails; do
|
||||
[[ -f "${f}" ]] || continue
|
||||
while IFS= read -r line; do
|
||||
[[ -n "${line}" ]] && FAIL_NAMES+=("${line}")
|
||||
done < "${f}"
|
||||
done
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Sequential runner
|
||||
# ---------------------------------------------------------------------------
|
||||
run_sequential() {
|
||||
suite_system
|
||||
suite_array
|
||||
suite_disk
|
||||
suite_docker
|
||||
suite_vm
|
||||
suite_notification
|
||||
suite_rclone
|
||||
suite_user
|
||||
suite_key
|
||||
suite_health
|
||||
suite_customization
|
||||
suite_plugin
|
||||
suite_oidc
|
||||
suite_live
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main
|
||||
# ---------------------------------------------------------------------------
|
||||
main() {
|
||||
parse_args "$@"
|
||||
|
||||
printf '%b%s%b\n' "${C_BOLD}" "$(printf '=%.0s' {1..65})" "${C_RESET}"
|
||||
printf '%b unraid-mcp integration smoke-test (single unraid tool)%b\n' "${C_BOLD}" "${C_RESET}"
|
||||
printf '%b Project: %s%b\n' "${C_BOLD}" "${PROJECT_DIR}" "${C_RESET}"
|
||||
printf '%b Timeout: %dms/call | Parallel: %s%b\n' \
|
||||
"${C_BOLD}" "${CALL_TIMEOUT_MS}" "${USE_PARALLEL}" "${C_RESET}"
|
||||
printf '%b Log: %s%b\n' "${C_BOLD}" "${LOG_FILE}" "${C_RESET}"
|
||||
printf '%b%s%b\n\n' "${C_BOLD}" "$(printf '=%.0s' {1..65})" "${C_RESET}"
|
||||
|
||||
check_prerequisites || exit 2
|
||||
|
||||
smoke_test_server || {
|
||||
log_error ""
|
||||
log_error "Server startup failed. Aborting — no tests will run."
|
||||
log_error ""
|
||||
log_error "To diagnose, run:"
|
||||
log_error " cd ${PROJECT_DIR} && uv run unraid-mcp-server"
|
||||
exit 2
|
||||
}
|
||||
|
||||
if [[ "${USE_PARALLEL}" == true ]]; then
|
||||
run_parallel
|
||||
else
|
||||
run_sequential
|
||||
fi
|
||||
|
||||
print_summary
|
||||
|
||||
if [[ "${FAIL_COUNT}" -gt 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
0
tests/property/__init__.py
Normal file
0
tests/property/__init__.py
Normal file
755
tests/property/test_input_validation.py
Normal file
755
tests/property/test_input_validation.py
Normal file
@@ -0,0 +1,755 @@
|
||||
"""Property-based tests for tool input validation.
|
||||
|
||||
Uses Hypothesis to fuzz tool inputs and verify the core invariant:
|
||||
Tools MUST only raise ToolError (or return normally).
|
||||
Any KeyError, AttributeError, TypeError, ValueError, IndexError, or
|
||||
other unhandled exception from arbitrary inputs is a bug.
|
||||
|
||||
Each test class targets a distinct tool domain and strategy profile:
|
||||
- Docker: arbitrary container IDs, subaction names, numeric params
|
||||
- Notifications: importance strings, list_type strings, field lengths
|
||||
- Keys: arbitrary key IDs, role lists, name strings
|
||||
- VM: arbitrary VM IDs, subaction names
|
||||
- Info: invalid subaction names (cross-tool invariant for the subaction guard)
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from fastmcp.exceptions import ToolError
|
||||
from hypothesis import HealthCheck, given, settings
|
||||
from hypothesis import strategies as st
|
||||
|
||||
|
||||
# Ensure tests/ is on sys.path so "from conftest import make_tool_fn" resolves
|
||||
# the same way that top-level test files do.
|
||||
_TESTS_DIR = str(Path(__file__).parent.parent)
|
||||
if _TESTS_DIR not in sys.path:
|
||||
sys.path.insert(0, _TESTS_DIR)
|
||||
|
||||
from conftest import make_tool_fn # noqa: E402
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Shared helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_ALLOWED_EXCEPTION_TYPES = (ToolError,)
|
||||
"""Only ToolError (or a clean return) is acceptable from any tool call.
|
||||
|
||||
Any other exception is a bug — it means the tool let an internal error
|
||||
surface to the caller instead of wrapping it in a user-friendly ToolError.
|
||||
"""
|
||||
|
||||
|
||||
def _run(coro) -> Any:
|
||||
"""Run a coroutine synchronously so Hypothesis @given works with async tools."""
|
||||
return asyncio.get_event_loop().run_until_complete(coro)
|
||||
|
||||
|
||||
def _assert_only_tool_error(exc: BaseException) -> None:
|
||||
"""Assert that an exception is a ToolError, not an internal crash."""
|
||||
assert isinstance(exc, ToolError), (
|
||||
f"Tool raised {type(exc).__name__} instead of ToolError: {exc!r}\n"
|
||||
"This is a bug — all error paths must produce ToolError."
|
||||
)
|
||||
|
||||
|
||||
def _make_tool() -> Any:
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Docker: arbitrary container IDs
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDockerContainerIdFuzzing:
|
||||
"""Fuzz the container_id parameter for Docker actions.
|
||||
|
||||
Invariant: no matter what string is supplied as container_id,
|
||||
the tool must only raise ToolError or return normally — never crash
|
||||
with a KeyError, AttributeError, or other internal exception.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_details_arbitrary_container_id(self, container_id: str) -> None:
|
||||
"""Arbitrary container IDs for 'details' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
with contextlib.suppress(ToolError):
|
||||
# ToolError is the only acceptable exception — suppress it
|
||||
await tool_fn(action="docker", subaction="details", container_id=container_id)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_start_arbitrary_container_id(self, container_id: str) -> None:
|
||||
"""Arbitrary container IDs for 'start' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
with contextlib.suppress(ToolError):
|
||||
await tool_fn(action="docker", subaction="start", container_id=container_id)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_stop_arbitrary_container_id(self, container_id: str) -> None:
|
||||
"""Arbitrary container IDs for 'stop' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
with contextlib.suppress(ToolError):
|
||||
await tool_fn(action="docker", subaction="stop", container_id=container_id)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_restart_arbitrary_container_id(self, container_id: str) -> None:
|
||||
"""Arbitrary container IDs for 'restart' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
# stop then start both need container list + mutation responses
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
with contextlib.suppress(ToolError):
|
||||
await tool_fn(action="docker", subaction="restart", container_id=container_id)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Docker: invalid subaction names
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDockerInvalidActions:
|
||||
"""Fuzz the subaction parameter with arbitrary strings for the docker domain.
|
||||
|
||||
Invariant: invalid subaction names raise ToolError, never KeyError or crash.
|
||||
This validates the subaction guard that sits inside every domain handler.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=200, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, subaction: str) -> None:
|
||||
"""Any non-valid subaction string for docker must raise ToolError, not crash."""
|
||||
valid_subactions = {
|
||||
"list",
|
||||
"details",
|
||||
"start",
|
||||
"stop",
|
||||
"restart",
|
||||
"networks",
|
||||
"network_details",
|
||||
}
|
||||
if subaction in valid_subactions:
|
||||
return # Skip valid subactions — they have different semantics
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock):
|
||||
try:
|
||||
await tool_fn(action="docker", subaction=subaction)
|
||||
except ToolError:
|
||||
pass # Correct: invalid subaction raises ToolError
|
||||
except Exception as exc:
|
||||
# Any other exception is a bug
|
||||
pytest.fail(
|
||||
f"subaction={subaction!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Notifications: importance and list_type enum fuzzing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNotificationsEnumFuzzing:
|
||||
"""Fuzz notification enum parameters.
|
||||
|
||||
Invariant: invalid enum values must produce ToolError with a helpful message,
|
||||
never crash with an AttributeError or unhandled exception.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=150, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_importance_raises_tool_error(self, importance: str) -> None:
|
||||
"""Arbitrary importance strings must raise ToolError or be accepted if valid."""
|
||||
valid_importances = {"INFO", "WARNING", "ALERT"}
|
||||
if importance.upper() in valid_importances:
|
||||
return # Skip valid values
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {
|
||||
"createNotification": {"id": "1", "title": "t", "importance": "INFO"}
|
||||
}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Test",
|
||||
subject="Sub",
|
||||
description="Desc",
|
||||
importance=importance,
|
||||
)
|
||||
except ToolError:
|
||||
pass # Expected for invalid importance
|
||||
except Exception as exc:
|
||||
pytest.fail(f"importance={importance!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=150, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_list_type_raises_tool_error(self, list_type: str) -> None:
|
||||
"""Arbitrary list_type strings must raise ToolError or proceed if valid."""
|
||||
valid_list_types = {"UNREAD", "ARCHIVE"}
|
||||
if list_type.upper() in valid_list_types:
|
||||
return # Skip valid values
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {"notifications": {"list": []}}
|
||||
try:
|
||||
await tool_fn(action="notification", subaction="list", list_type=list_type)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"list_type={list_type!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(
|
||||
st.text(max_size=300), # title: limit is 200
|
||||
st.text(max_size=600), # subject: limit is 500
|
||||
st.text(max_size=2500), # description: limit is 2000
|
||||
)
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_create_notification_field_lengths(
|
||||
self, title: str, subject: str, description: str
|
||||
) -> None:
|
||||
"""Oversized title/subject/description must raise ToolError, not crash.
|
||||
|
||||
This tests the length-guard invariant: tools that have max-length checks
|
||||
must raise ToolError for oversized values, never truncate silently or crash.
|
||||
"""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {
|
||||
"createNotification": {"id": "1", "title": "t", "importance": "INFO"}
|
||||
}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title=title,
|
||||
subject=subject,
|
||||
description=description,
|
||||
importance="INFO",
|
||||
)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"create with oversized fields raised {type(exc).__name__}: {exc!r}"
|
||||
)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_notification_type_raises_tool_error(self, notif_type: str) -> None:
|
||||
"""Arbitrary notification_type strings must raise ToolError or proceed if valid."""
|
||||
valid_types = {"UNREAD", "ARCHIVE"}
|
||||
if notif_type.upper() in valid_types:
|
||||
return
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {"deleteNotification": {}}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="some-id",
|
||||
notification_type=notif_type,
|
||||
confirm=True,
|
||||
)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"notification_type={notif_type!r} raised {type(exc).__name__}: {exc!r}"
|
||||
)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, subaction: str) -> None:
|
||||
"""Invalid subaction names for notifications domain raise ToolError."""
|
||||
valid_subactions = {
|
||||
"overview",
|
||||
"list",
|
||||
"create",
|
||||
"archive",
|
||||
"mark_unread",
|
||||
"delete",
|
||||
"delete_archived",
|
||||
"archive_all",
|
||||
"archive_many",
|
||||
"unarchive_many",
|
||||
"unarchive_all",
|
||||
"recalculate",
|
||||
}
|
||||
if subaction in valid_subactions:
|
||||
return
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
):
|
||||
try:
|
||||
await tool_fn(action="notification", subaction=subaction)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"subaction={subaction!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Keys: arbitrary key IDs and role lists
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestKeysInputFuzzing:
|
||||
"""Fuzz API key management parameters.
|
||||
|
||||
Invariant: arbitrary key_id strings, names, and role lists never crash
|
||||
the keys domain — only ToolError or clean return values are acceptable.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_get_arbitrary_key_id(self, key_id: str) -> None:
|
||||
"""Arbitrary key_id for 'get' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"apiKey": None}
|
||||
try:
|
||||
await tool_fn(action="key", subaction="get", key_id=key_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"key_id={key_id!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_create_arbitrary_key_name(self, name: str) -> None:
|
||||
"""Arbitrary name strings for 'create' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {
|
||||
"apiKey": {"create": {"id": "1", "name": name, "key": "k", "roles": []}}
|
||||
}
|
||||
try:
|
||||
await tool_fn(action="key", subaction="create", name=name)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"name={name!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.lists(st.text(), min_size=1, max_size=10))
|
||||
@settings(max_examples=80, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_add_role_arbitrary_roles(self, roles: list[str]) -> None:
|
||||
"""Arbitrary role lists for 'add_role' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"apiKey": {"addRole": True}}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="key", subaction="add_role", key_id="some-key-id", roles=roles
|
||||
)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"roles={roles!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, subaction: str) -> None:
|
||||
"""Invalid subaction names for keys domain raise ToolError."""
|
||||
valid_subactions = {"list", "get", "create", "update", "delete", "add_role", "remove_role"}
|
||||
if subaction in valid_subactions:
|
||||
return
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock):
|
||||
try:
|
||||
await tool_fn(action="key", subaction=subaction)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"subaction={subaction!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VM: arbitrary VM IDs and subaction names
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVMInputFuzzing:
|
||||
"""Fuzz VM management parameters.
|
||||
|
||||
Invariant: arbitrary vm_id strings and subaction names must never crash
|
||||
the VM domain — only ToolError or clean return values are acceptable.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_start_arbitrary_vm_id(self, vm_id: str) -> None:
|
||||
"""Arbitrary vm_id for 'start' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {"vm": {"start": True}}
|
||||
try:
|
||||
await tool_fn(action="vm", subaction="start", vm_id=vm_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"vm_id={vm_id!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_stop_arbitrary_vm_id(self, vm_id: str) -> None:
|
||||
"""Arbitrary vm_id for 'stop' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {"vm": {"stop": True}}
|
||||
try:
|
||||
await tool_fn(action="vm", subaction="stop", vm_id=vm_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"vm_id={vm_id!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=100, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_details_arbitrary_vm_id(self, vm_id: str) -> None:
|
||||
"""Arbitrary vm_id for 'details' must not crash the tool."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
# Return an empty VM list so the lookup gracefully fails
|
||||
mock.return_value = {"vms": {"domains": []}}
|
||||
try:
|
||||
await tool_fn(action="vm", subaction="details", vm_id=vm_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"vm_id={vm_id!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=200, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, subaction: str) -> None:
|
||||
"""Invalid subaction names for VM domain raise ToolError."""
|
||||
valid_subactions = {
|
||||
"list",
|
||||
"details",
|
||||
"start",
|
||||
"stop",
|
||||
"pause",
|
||||
"resume",
|
||||
"force_stop",
|
||||
"reboot",
|
||||
"reset",
|
||||
}
|
||||
if subaction in valid_subactions:
|
||||
return
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
):
|
||||
try:
|
||||
await tool_fn(action="vm", subaction=subaction)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"subaction={subaction!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cross-tool: boundary-value and unicode stress tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBoundaryValues:
|
||||
"""Boundary-value and adversarial string tests across multiple tools.
|
||||
|
||||
These tests probe specific edge cases that have historically caused bugs
|
||||
in similar systems: null bytes, very long strings, unicode surrogates,
|
||||
and empty strings.
|
||||
"""
|
||||
|
||||
@given(
|
||||
st.one_of(
|
||||
st.just(""),
|
||||
st.just("\x00"),
|
||||
st.just("\xff\xfe"),
|
||||
st.just("a" * 10_001),
|
||||
st.just("/" * 500),
|
||||
st.just("'; DROP TABLE containers; --"),
|
||||
st.just("${7*7}"),
|
||||
st.just("\u0000\uffff"),
|
||||
st.just("\n\r\t"),
|
||||
st.binary().map(lambda b: b.decode("latin-1")),
|
||||
)
|
||||
)
|
||||
@settings(max_examples=50, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_docker_details_adversarial_inputs(self, container_id: str) -> None:
|
||||
"""Adversarial container_id values must not crash the Docker domain."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"docker": {"containers": []}}
|
||||
try:
|
||||
await tool_fn(action="docker", subaction="details", container_id=container_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"Adversarial input {container_id!r} raised {type(exc).__name__}: {exc!r}"
|
||||
)
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(
|
||||
st.one_of(
|
||||
st.just(""),
|
||||
st.just("\x00"),
|
||||
st.just("a" * 100_000),
|
||||
st.just("ALERT\x00"),
|
||||
st.just("info"), # wrong case
|
||||
st.just("Info"), # mixed case
|
||||
st.just("UNKNOWN"),
|
||||
st.just(" INFO "), # padded
|
||||
)
|
||||
)
|
||||
@settings(max_examples=30, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_notifications_importance_adversarial(self, importance: str) -> None:
|
||||
"""Adversarial importance values must raise ToolError, not crash."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
mock.return_value = {
|
||||
"createNotification": {"id": "1", "title": "t", "importance": "INFO"}
|
||||
}
|
||||
try:
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="t",
|
||||
subject="s",
|
||||
description="d",
|
||||
importance=importance,
|
||||
)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"importance={importance!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
@given(
|
||||
st.one_of(
|
||||
st.just(""),
|
||||
st.just("\x00"),
|
||||
st.just("a" * 1_000_000), # extreme length
|
||||
st.just("key with spaces"),
|
||||
st.just("key\nnewline"),
|
||||
)
|
||||
)
|
||||
@settings(max_examples=20, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_keys_get_adversarial_key_ids(self, key_id: str) -> None:
|
||||
"""Adversarial key_id values must not crash the keys get action."""
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
mock.return_value = {"apiKey": None}
|
||||
try:
|
||||
await tool_fn(action="key", subaction="get", key_id=key_id)
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(f"key_id={key_id!r} raised {type(exc).__name__}: {exc!r}")
|
||||
|
||||
_run(_run_test())
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Top-level action guard (invalid domain names)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestInfoActionGuard:
|
||||
"""Fuzz the top-level action parameter (domain selector).
|
||||
|
||||
Invariant: the consolidated unraid tool must reject any invalid domain
|
||||
with a ToolError rather than a KeyError crash.
|
||||
"""
|
||||
|
||||
@given(st.text())
|
||||
@settings(max_examples=200, suppress_health_check=[HealthCheck.function_scoped_fixture])
|
||||
def test_invalid_action_raises_tool_error(self, action: str) -> None:
|
||||
"""Invalid domain names raise ToolError."""
|
||||
valid_actions = {
|
||||
"array",
|
||||
"customization",
|
||||
"disk",
|
||||
"docker",
|
||||
"health",
|
||||
"key",
|
||||
"live",
|
||||
"notification",
|
||||
"oidc",
|
||||
"plugin",
|
||||
"rclone",
|
||||
"setting",
|
||||
"system",
|
||||
"user",
|
||||
"vm",
|
||||
}
|
||||
if action in valid_actions:
|
||||
return
|
||||
|
||||
async def _run_test() -> None:
|
||||
tool_fn = _make_tool()
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock):
|
||||
try:
|
||||
await tool_fn(action=action, subaction="list")
|
||||
except ToolError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
pytest.fail(
|
||||
f"Action {action!r} raised {type(exc).__name__} "
|
||||
f"instead of ToolError: {exc!r}"
|
||||
)
|
||||
|
||||
_run(_run_test())
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Safety audit tests for destructive action confirmation guards.
|
||||
|
||||
Verifies that all destructive operations across every tool require
|
||||
Verifies that all destructive operations across every domain require
|
||||
explicit `confirm=True` before execution, and that the DESTRUCTIVE_ACTIONS
|
||||
registries are complete and consistent.
|
||||
"""
|
||||
@@ -9,66 +9,75 @@ from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
# Import DESTRUCTIVE_ACTIONS sets from every tool module that defines one
|
||||
from unraid_mcp.tools.docker import DESTRUCTIVE_ACTIONS as DOCKER_DESTRUCTIVE
|
||||
from unraid_mcp.tools.docker import MUTATIONS as DOCKER_MUTATIONS
|
||||
from unraid_mcp.tools.keys import DESTRUCTIVE_ACTIONS as KEYS_DESTRUCTIVE
|
||||
from unraid_mcp.tools.keys import MUTATIONS as KEYS_MUTATIONS
|
||||
from unraid_mcp.tools.notifications import DESTRUCTIVE_ACTIONS as NOTIF_DESTRUCTIVE
|
||||
from unraid_mcp.tools.notifications import MUTATIONS as NOTIF_MUTATIONS
|
||||
from unraid_mcp.tools.rclone import DESTRUCTIVE_ACTIONS as RCLONE_DESTRUCTIVE
|
||||
from unraid_mcp.tools.rclone import MUTATIONS as RCLONE_MUTATIONS
|
||||
from unraid_mcp.tools.virtualization import DESTRUCTIVE_ACTIONS as VM_DESTRUCTIVE
|
||||
from unraid_mcp.tools.virtualization import MUTATIONS as VM_MUTATIONS
|
||||
|
||||
# Centralized import for make_tool_fn helper
|
||||
# conftest.py sits in tests/ and is importable without __init__.py
|
||||
from conftest import make_tool_fn
|
||||
# Import DESTRUCTIVE_ACTIONS and MUTATIONS sets from the consolidated unraid module
|
||||
from unraid_mcp.tools.unraid import (
|
||||
_ARRAY_DESTRUCTIVE,
|
||||
_ARRAY_MUTATIONS,
|
||||
_DISK_DESTRUCTIVE,
|
||||
_DISK_MUTATIONS,
|
||||
_KEY_DESTRUCTIVE,
|
||||
_KEY_MUTATIONS,
|
||||
_NOTIFICATION_DESTRUCTIVE,
|
||||
_NOTIFICATION_MUTATIONS,
|
||||
_PLUGIN_DESTRUCTIVE,
|
||||
_PLUGIN_MUTATIONS,
|
||||
_RCLONE_DESTRUCTIVE,
|
||||
_RCLONE_MUTATIONS,
|
||||
_SETTING_DESTRUCTIVE,
|
||||
_SETTING_MUTATIONS,
|
||||
_VM_DESTRUCTIVE,
|
||||
_VM_MUTATIONS,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Known destructive actions registry (ground truth for this audit)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Every destructive action in the codebase, keyed by (tool_module, tool_name)
|
||||
KNOWN_DESTRUCTIVE: dict[str, dict[str, set[str]]] = {
|
||||
"docker": {
|
||||
"module": "unraid_mcp.tools.docker",
|
||||
"register_fn": "register_docker_tool",
|
||||
"tool_name": "unraid_docker",
|
||||
"actions": {"remove"},
|
||||
"runtime_set": DOCKER_DESTRUCTIVE,
|
||||
KNOWN_DESTRUCTIVE: dict[str, dict] = {
|
||||
"array": {
|
||||
"actions": {"remove_disk", "clear_disk_stats", "stop_array"},
|
||||
"runtime_set": _ARRAY_DESTRUCTIVE,
|
||||
"mutations": _ARRAY_MUTATIONS,
|
||||
},
|
||||
"vm": {
|
||||
"module": "unraid_mcp.tools.virtualization",
|
||||
"register_fn": "register_vm_tool",
|
||||
"tool_name": "unraid_vm",
|
||||
"actions": {"force_stop", "reset"},
|
||||
"runtime_set": VM_DESTRUCTIVE,
|
||||
"runtime_set": _VM_DESTRUCTIVE,
|
||||
"mutations": _VM_MUTATIONS,
|
||||
},
|
||||
"notifications": {
|
||||
"module": "unraid_mcp.tools.notifications",
|
||||
"register_fn": "register_notifications_tool",
|
||||
"tool_name": "unraid_notifications",
|
||||
"notification": {
|
||||
"actions": {"delete", "delete_archived"},
|
||||
"runtime_set": NOTIF_DESTRUCTIVE,
|
||||
"runtime_set": _NOTIFICATION_DESTRUCTIVE,
|
||||
"mutations": _NOTIFICATION_MUTATIONS,
|
||||
},
|
||||
"rclone": {
|
||||
"module": "unraid_mcp.tools.rclone",
|
||||
"register_fn": "register_rclone_tool",
|
||||
"tool_name": "unraid_rclone",
|
||||
"actions": {"delete_remote"},
|
||||
"runtime_set": RCLONE_DESTRUCTIVE,
|
||||
"runtime_set": _RCLONE_DESTRUCTIVE,
|
||||
"mutations": _RCLONE_MUTATIONS,
|
||||
},
|
||||
"keys": {
|
||||
"module": "unraid_mcp.tools.keys",
|
||||
"register_fn": "register_keys_tool",
|
||||
"tool_name": "unraid_keys",
|
||||
"key": {
|
||||
"actions": {"delete"},
|
||||
"runtime_set": KEYS_DESTRUCTIVE,
|
||||
"runtime_set": _KEY_DESTRUCTIVE,
|
||||
"mutations": _KEY_MUTATIONS,
|
||||
},
|
||||
"disk": {
|
||||
"actions": {"flash_backup"},
|
||||
"runtime_set": _DISK_DESTRUCTIVE,
|
||||
"mutations": _DISK_MUTATIONS,
|
||||
},
|
||||
"setting": {
|
||||
"actions": {"configure_ups"},
|
||||
"runtime_set": _SETTING_DESTRUCTIVE,
|
||||
"mutations": _SETTING_MUTATIONS,
|
||||
},
|
||||
"plugin": {
|
||||
"actions": {"remove"},
|
||||
"runtime_set": _PLUGIN_DESTRUCTIVE,
|
||||
"mutations": _PLUGIN_MUTATIONS,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -81,183 +90,168 @@ KNOWN_DESTRUCTIVE: dict[str, dict[str, set[str]]] = {
|
||||
class TestDestructiveActionRegistries:
|
||||
"""Verify that DESTRUCTIVE_ACTIONS sets in source code match the audit."""
|
||||
|
||||
@pytest.mark.parametrize("tool_key", list(KNOWN_DESTRUCTIVE.keys()))
|
||||
def test_destructive_set_matches_audit(self, tool_key: str) -> None:
|
||||
"""Each tool's DESTRUCTIVE_ACTIONS must exactly match the audited set."""
|
||||
info = KNOWN_DESTRUCTIVE[tool_key]
|
||||
@pytest.mark.parametrize("domain", list(KNOWN_DESTRUCTIVE.keys()))
|
||||
def test_destructive_set_matches_audit(self, domain: str) -> None:
|
||||
info = KNOWN_DESTRUCTIVE[domain]
|
||||
assert info["runtime_set"] == info["actions"], (
|
||||
f"{tool_key}: DESTRUCTIVE_ACTIONS is {info['runtime_set']}, "
|
||||
f"expected {info['actions']}"
|
||||
f"{domain}: DESTRUCTIVE_ACTIONS is {info['runtime_set']}, expected {info['actions']}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("tool_key", list(KNOWN_DESTRUCTIVE.keys()))
|
||||
def test_destructive_actions_are_valid_mutations(self, tool_key: str) -> None:
|
||||
"""Every destructive action must correspond to an actual mutation."""
|
||||
info = KNOWN_DESTRUCTIVE[tool_key]
|
||||
mutations_map = {
|
||||
"docker": DOCKER_MUTATIONS,
|
||||
"vm": VM_MUTATIONS,
|
||||
"notifications": NOTIF_MUTATIONS,
|
||||
"rclone": RCLONE_MUTATIONS,
|
||||
"keys": KEYS_MUTATIONS,
|
||||
}
|
||||
mutations = mutations_map[tool_key]
|
||||
@pytest.mark.parametrize("domain", list(KNOWN_DESTRUCTIVE.keys()))
|
||||
def test_destructive_actions_are_valid_mutations(self, domain: str) -> None:
|
||||
info = KNOWN_DESTRUCTIVE[domain]
|
||||
for action in info["actions"]:
|
||||
assert action in mutations, (
|
||||
f"{tool_key}: destructive action '{action}' is not in MUTATIONS"
|
||||
assert action in info["mutations"], (
|
||||
f"{domain}: destructive action '{action}' is not in MUTATIONS"
|
||||
)
|
||||
|
||||
def test_no_delete_or_remove_mutations_missing_from_destructive(self) -> None:
|
||||
"""Any mutation with 'delete' or 'remove' in its name should be destructive."""
|
||||
all_mutations = {
|
||||
"docker": DOCKER_MUTATIONS,
|
||||
"vm": VM_MUTATIONS,
|
||||
"notifications": NOTIF_MUTATIONS,
|
||||
"rclone": RCLONE_MUTATIONS,
|
||||
"keys": KEYS_MUTATIONS,
|
||||
}
|
||||
all_destructive = {
|
||||
"docker": DOCKER_DESTRUCTIVE,
|
||||
"vm": VM_DESTRUCTIVE,
|
||||
"notifications": NOTIF_DESTRUCTIVE,
|
||||
"rclone": RCLONE_DESTRUCTIVE,
|
||||
"keys": KEYS_DESTRUCTIVE,
|
||||
}
|
||||
"""Any mutation with 'delete' or 'remove' in its name should be destructive.
|
||||
|
||||
Exceptions (documented, intentional):
|
||||
key/remove_role — fully reversible; the role can always be re-added via add_role.
|
||||
"""
|
||||
_HEURISTIC_EXCEPTIONS: frozenset[str] = frozenset(
|
||||
{
|
||||
"key/remove_role", # reversible — role can be re-added via add_role
|
||||
}
|
||||
)
|
||||
|
||||
missing: list[str] = []
|
||||
for tool_key, mutations in all_mutations.items():
|
||||
destructive = all_destructive[tool_key]
|
||||
for action_name in mutations:
|
||||
if ("delete" in action_name or "remove" in action_name) and action_name not in destructive:
|
||||
missing.append(f"{tool_key}/{action_name}")
|
||||
for domain, info in KNOWN_DESTRUCTIVE.items():
|
||||
destructive = info["runtime_set"]
|
||||
for action_name in info["mutations"]:
|
||||
if (
|
||||
("delete" in action_name or "remove" in action_name)
|
||||
and action_name not in destructive
|
||||
and f"{domain}/{action_name}" not in _HEURISTIC_EXCEPTIONS
|
||||
):
|
||||
missing.append(f"{domain}/{action_name}")
|
||||
assert not missing, (
|
||||
f"Mutations with 'delete'/'remove' not in DESTRUCTIVE_ACTIONS: {missing}"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Confirmation guard tests: calling without confirm=True raises ToolError
|
||||
# Confirmation guard tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Build parametrized test cases: (tool_key, action, kwargs_without_confirm)
|
||||
# Each destructive action needs the minimum required params (minus confirm)
|
||||
# (action, subaction, extra_kwargs)
|
||||
_DESTRUCTIVE_TEST_CASES: list[tuple[str, str, dict]] = [
|
||||
# Docker
|
||||
("docker", "remove", {"container_id": "abc123"}),
|
||||
# Array
|
||||
("array", "remove_disk", {"disk_id": "abc123:local"}),
|
||||
("array", "clear_disk_stats", {"disk_id": "abc123:local"}),
|
||||
("array", "stop_array", {}),
|
||||
# VM
|
||||
("vm", "force_stop", {"vm_id": "test-vm-uuid"}),
|
||||
("vm", "reset", {"vm_id": "test-vm-uuid"}),
|
||||
# Notifications
|
||||
("notifications", "delete", {"notification_id": "notif-1", "notification_type": "UNREAD"}),
|
||||
("notifications", "delete_archived", {}),
|
||||
("notification", "delete", {"notification_id": "notif-1", "notification_type": "UNREAD"}),
|
||||
("notification", "delete_archived", {}),
|
||||
# RClone
|
||||
("rclone", "delete_remote", {"name": "my-remote"}),
|
||||
# Keys
|
||||
("keys", "delete", {"key_id": "key-123"}),
|
||||
("key", "delete", {"key_id": "key-123"}),
|
||||
# Disk (flash_backup)
|
||||
(
|
||||
"disk",
|
||||
"flash_backup",
|
||||
{"remote_name": "r", "source_path": "/boot", "destination_path": "r:b"},
|
||||
),
|
||||
# Settings
|
||||
("setting", "configure_ups", {"ups_config": {"mode": "slave"}}),
|
||||
# Plugins
|
||||
("plugin", "remove", {"names": ["my-plugin"]}),
|
||||
]
|
||||
|
||||
|
||||
_CASE_IDS = [f"{c[0]}/{c[1]}" for c in _DESTRUCTIVE_TEST_CASES]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_docker_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
_MODULE = "unraid_mcp.tools.unraid"
|
||||
_REGISTER_FN = "register_unraid_tool"
|
||||
_TOOL_NAME = "unraid"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_vm_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.virtualization.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch(f"{_MODULE}.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_notif_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_rclone_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.rclone.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_keys_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
# Map tool_key -> (fixture name, module path, register fn, tool name)
|
||||
_TOOL_REGISTRY = {
|
||||
"docker": ("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"),
|
||||
"vm": ("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm"),
|
||||
"notifications": ("unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"),
|
||||
"rclone": ("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone"),
|
||||
"keys": ("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys"),
|
||||
}
|
||||
|
||||
|
||||
class TestConfirmationGuards:
|
||||
"""Every destructive action must reject calls without confirm=True."""
|
||||
|
||||
@pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_rejects_without_confirm(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_docker_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""Calling a destructive action without confirm=True must raise ToolError."""
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError, match="confirm=True"):
|
||||
await tool_fn(action=action, **kwargs)
|
||||
await tool_fn(action=action, subaction=subaction, **kwargs)
|
||||
|
||||
@pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_rejects_with_confirm_false(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_docker_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""Explicitly passing confirm=False must still raise ToolError."""
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError, match="confirm=True"):
|
||||
await tool_fn(action=action, subaction=subaction, confirm=False, **kwargs)
|
||||
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action=action, confirm=False, **kwargs)
|
||||
|
||||
@pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_error_message_includes_action_name(
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_error_message_includes_subaction_name(
|
||||
self,
|
||||
tool_key: str,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_docker_graphql: AsyncMock,
|
||||
_mock_vm_graphql: AsyncMock,
|
||||
_mock_notif_graphql: AsyncMock,
|
||||
_mock_rclone_graphql: AsyncMock,
|
||||
_mock_keys_graphql: AsyncMock,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
"""The error message should include the action name for clarity."""
|
||||
module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key]
|
||||
tool_fn = make_tool_fn(module_path, register_fn, tool_name)
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError, match=subaction):
|
||||
await tool_fn(action=action, subaction=subaction, **kwargs)
|
||||
|
||||
with pytest.raises(ToolError, match=action):
|
||||
await tool_fn(action=action, **kwargs)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Strict guard tests: no network calls escape when unconfirmed
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNoGraphQLCallsWhenUnconfirmed:
|
||||
"""The most critical safety property: when confirm is missing/False,
|
||||
NO GraphQL request must ever reach the network layer.
|
||||
"""
|
||||
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_no_graphql_call_without_confirm(
|
||||
self,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError):
|
||||
await tool_fn(action=action, subaction=subaction, **kwargs)
|
||||
_mock_graphql.assert_not_called()
|
||||
|
||||
@pytest.mark.parametrize("action,subaction,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS)
|
||||
async def test_no_graphql_call_with_confirm_false(
|
||||
self,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
with pytest.raises(ToolError):
|
||||
await tool_fn(action=action, subaction=subaction, confirm=False, **kwargs)
|
||||
_mock_graphql.assert_not_called()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -268,57 +262,152 @@ class TestConfirmationGuards:
|
||||
class TestConfirmAllowsExecution:
|
||||
"""Destructive actions with confirm=True should reach the GraphQL layer."""
|
||||
|
||||
async def test_docker_remove_with_confirm(self, _mock_docker_graphql: AsyncMock) -> None:
|
||||
cid = "a" * 64 + ":local"
|
||||
_mock_docker_graphql.side_effect = [
|
||||
{"docker": {"containers": [{"id": cid, "names": ["old-app"]}]}},
|
||||
{"docker": {"removeContainer": True}},
|
||||
]
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker")
|
||||
result = await tool_fn(action="remove", container_id="old-app", confirm=True)
|
||||
async def test_vm_force_stop_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"forceStop": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="vm", subaction="force_stop", vm_id="test-uuid", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_vm_force_stop_with_confirm(self, _mock_vm_graphql: AsyncMock) -> None:
|
||||
_mock_vm_graphql.return_value = {"vm": {"forceStop": True}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm")
|
||||
result = await tool_fn(action="force_stop", vm_id="test-uuid", confirm=True)
|
||||
async def test_vm_reset_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"reset": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="vm", subaction="reset", vm_id="test-uuid", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_vm_reset_with_confirm(self, _mock_vm_graphql: AsyncMock) -> None:
|
||||
_mock_vm_graphql.return_value = {"vm": {"reset": True}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm")
|
||||
result = await tool_fn(action="reset", vm_id="test-uuid", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_notifications_delete_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None:
|
||||
_mock_notif_graphql.return_value = {"notifications": {"deleteNotification": True}}
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"
|
||||
)
|
||||
async def test_notifications_delete_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"deleteNotification": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="delete",
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="notif-1",
|
||||
notification_type="UNREAD",
|
||||
confirm=True,
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_notifications_delete_archived_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None:
|
||||
_mock_notif_graphql.return_value = {"notifications": {"deleteArchivedNotifications": True}}
|
||||
tool_fn = make_tool_fn(
|
||||
"unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"
|
||||
async def test_notifications_delete_archived_with_confirm(
|
||||
self, _mock_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"deleteArchivedNotifications": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="notification", subaction="delete_archived", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_rclone_delete_remote_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"deleteRCloneRemote": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="rclone", subaction="delete_remote", name="my-remote", confirm=True
|
||||
)
|
||||
result = await tool_fn(action="delete_archived", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_rclone_delete_remote_with_confirm(self, _mock_rclone_graphql: AsyncMock) -> None:
|
||||
_mock_rclone_graphql.return_value = {"rclone": {"deleteRCloneRemote": True}}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone")
|
||||
result = await tool_fn(action="delete_remote", name="my-remote", confirm=True)
|
||||
async def test_keys_delete_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"apiKey": {"delete": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="key", subaction="delete", key_id="key-123", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_keys_delete_with_confirm(self, _mock_keys_graphql: AsyncMock) -> None:
|
||||
_mock_keys_graphql.return_value = {"deleteApiKeys": True}
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
result = await tool_fn(action="delete", key_id="key-123", confirm=True)
|
||||
async def test_disk_flash_backup_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"initiateFlashBackup": {"status": "started", "jobId": "j:1"}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
confirm=True,
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
destination_path="r:b",
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_settings_configure_ups_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"configureUps": True}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="setting",
|
||||
subaction="configure_ups",
|
||||
confirm=True,
|
||||
ups_config={"mode": "master", "cable": "usb"},
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_array_remove_disk_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"array": {"removeDiskFromArray": {"state": "STOPPED"}}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="array", subaction="remove_disk", disk_id="abc:local", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_array_clear_disk_stats_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"array": {"clearArrayDiskStatistics": True}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="array", subaction="clear_disk_stats", disk_id="abc:local", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_array_stop_array_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"array": {"setState": {"state": "STOPPED"}}}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action="array", subaction="stop_array", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_plugins_remove_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"removePlugin": True}
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(
|
||||
action="plugin", subaction="remove", names=["my-plugin"], confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Non-destructive actions must NOT require confirm
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNonDestructiveActionsNeverRequireConfirm:
|
||||
"""Guard regression: non-destructive ops must work without confirm."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"action,subaction,kwargs,mock_return",
|
||||
[
|
||||
("array", "parity_cancel", {}, {"parityCheck": {"cancel": True}}),
|
||||
("vm", "start", {"vm_id": "test-uuid"}, {"vm": {"start": True}}),
|
||||
("notification", "archive_all", {}, {"archiveAll": {"info": 0, "total": 0}}),
|
||||
("rclone", "list_remotes", {}, {"rclone": {"remotes": []}}),
|
||||
("key", "list", {}, {"apiKeys": []}),
|
||||
],
|
||||
ids=[
|
||||
"array/parity_cancel",
|
||||
"vm/start",
|
||||
"notification/archive_all",
|
||||
"rclone/list_remotes",
|
||||
"key/list",
|
||||
],
|
||||
)
|
||||
async def test_non_destructive_action_works_without_confirm(
|
||||
self,
|
||||
action: str,
|
||||
subaction: str,
|
||||
kwargs: dict,
|
||||
mock_return: dict,
|
||||
_mock_graphql: AsyncMock,
|
||||
) -> None:
|
||||
_mock_graphql.return_value = mock_return
|
||||
tool_fn = make_tool_fn(_MODULE, _REGISTER_FN, _TOOL_NAME)
|
||||
result = await tool_fn(action=action, subaction=subaction, **kwargs)
|
||||
assert result is not None
|
||||
_mock_graphql.assert_called_once()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_array tool."""
|
||||
"""Tests for array subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,48 +11,54 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.array.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.array", "register_array_tool", "unraid_array")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestArrayValidation:
|
||||
async def test_invalid_action_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
async def test_invalid_subaction_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="start")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="array", subaction="start")
|
||||
|
||||
async def test_removed_actions_are_invalid(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
for action in (
|
||||
for subaction in (
|
||||
"start",
|
||||
"stop",
|
||||
"shutdown",
|
||||
"reboot",
|
||||
"mount_disk",
|
||||
"unmount_disk",
|
||||
"clear_stats",
|
||||
):
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action=action)
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="array", subaction=subaction)
|
||||
|
||||
async def test_parity_start_requires_correct(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="correct is required"):
|
||||
await tool_fn(action="array", subaction="parity_start")
|
||||
_mock_graphql.assert_not_called()
|
||||
|
||||
|
||||
class TestArrayActions:
|
||||
async def test_parity_start(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start")
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "parity_start"
|
||||
assert result["subaction"] == "parity_start"
|
||||
_mock_graphql.assert_called_once()
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"correct": False}
|
||||
|
||||
async def test_parity_start_with_correct(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start", correct=True)
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=True)
|
||||
assert result["success"] is True
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"correct": True}
|
||||
@@ -60,32 +66,32 @@ class TestArrayActions:
|
||||
async def test_parity_status(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"array": {"parityCheckStatus": {"progress": 50}}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_status")
|
||||
result = await tool_fn(action="array", subaction="parity_status")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_parity_pause(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"pause": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_pause")
|
||||
result = await tool_fn(action="array", subaction="parity_pause")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_parity_resume(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"resume": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_resume")
|
||||
result = await tool_fn(action="array", subaction="parity_resume")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_parity_cancel(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"cancel": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_cancel")
|
||||
result = await tool_fn(action="array", subaction="parity_cancel")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("disk error")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="disk error"):
|
||||
await tool_fn(action="parity_status")
|
||||
with pytest.raises(ToolError, match="Failed to execute array/parity_status"):
|
||||
await tool_fn(action="array", subaction="parity_status")
|
||||
|
||||
|
||||
class TestArrayMutationFailures:
|
||||
@@ -94,14 +100,14 @@ class TestArrayMutationFailures:
|
||||
async def test_parity_start_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": False}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start")
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
assert result["success"] is True
|
||||
assert result["data"] == {"parityCheck": {"start": False}}
|
||||
|
||||
async def test_parity_start_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": None}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start")
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
assert result["success"] is True
|
||||
assert result["data"] == {"parityCheck": {"start": None}}
|
||||
|
||||
@@ -110,7 +116,7 @@ class TestArrayMutationFailures:
|
||||
) -> None:
|
||||
_mock_graphql.return_value = {"parityCheck": {"start": {}}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="parity_start")
|
||||
result = await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
assert result["success"] is True
|
||||
assert result["data"] == {"parityCheck": {"start": {}}}
|
||||
|
||||
@@ -118,7 +124,7 @@ class TestArrayMutationFailures:
|
||||
_mock_graphql.side_effect = TimeoutError("operation timed out")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="timed out"):
|
||||
await tool_fn(action="parity_cancel")
|
||||
await tool_fn(action="array", subaction="parity_cancel")
|
||||
|
||||
|
||||
class TestArrayNetworkErrors:
|
||||
@@ -128,10 +134,123 @@ class TestArrayNetworkErrors:
|
||||
_mock_graphql.side_effect = ToolError("HTTP error 500: Internal Server Error")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="HTTP error 500"):
|
||||
await tool_fn(action="parity_start")
|
||||
await tool_fn(action="array", subaction="parity_start", correct=False)
|
||||
|
||||
async def test_connection_refused(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = ToolError("Network connection error: Connection refused")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Network connection error"):
|
||||
await tool_fn(action="parity_status")
|
||||
await tool_fn(action="array", subaction="parity_status")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# New actions: parity_history, start/stop array, disk operations
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
# parity_history
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parity_history_returns_history(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"parityHistory": [{"date": "2026-03-01T00:00:00Z", "status": "COMPLETED", "errors": 0}]
|
||||
}
|
||||
result = await _make_tool()(action="array", subaction="parity_history")
|
||||
assert result["success"] is True
|
||||
assert len(result["data"]["parityHistory"]) == 1
|
||||
|
||||
|
||||
# Array state mutations
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_start_array(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"setState": {"state": "STARTED"}}}
|
||||
result = await _make_tool()(action="array", subaction="start_array")
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_array_requires_confirm(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await _make_tool()(action="array", subaction="stop_array", confirm=False)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_array_with_confirm(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"setState": {"state": "STOPPED"}}}
|
||||
result = await _make_tool()(action="array", subaction="stop_array", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
# add_disk
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_disk_requires_disk_id(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="disk_id"):
|
||||
await _make_tool()(action="array", subaction="add_disk")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_disk_success(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"addDiskToArray": {"state": "STARTED"}}}
|
||||
result = await _make_tool()(action="array", subaction="add_disk", disk_id="abc123:local")
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
# remove_disk — destructive
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_disk_requires_confirm(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await _make_tool()(
|
||||
action="array", subaction="remove_disk", disk_id="abc123:local", confirm=False
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_disk_with_confirm(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"removeDiskFromArray": {"state": "STOPPED"}}}
|
||||
result = await _make_tool()(
|
||||
action="array", subaction="remove_disk", disk_id="abc123:local", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
# mount_disk / unmount_disk
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_mount_disk_requires_disk_id(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="disk_id"):
|
||||
await _make_tool()(action="array", subaction="mount_disk")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_unmount_disk_success(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"unmountArrayDisk": {"id": "abc123:local"}}}
|
||||
result = await _make_tool()(action="array", subaction="unmount_disk", disk_id="abc123:local")
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
# clear_disk_stats — destructive
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_clear_disk_stats_requires_confirm(_mock_graphql):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await _make_tool()(
|
||||
action="array", subaction="clear_disk_stats", disk_id="abc123:local", confirm=False
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_clear_disk_stats_with_confirm(_mock_graphql):
|
||||
_mock_graphql.return_value = {"array": {"clearArrayDiskStatistics": True}}
|
||||
result = await _make_tool()(
|
||||
action="array", subaction="clear_disk_stats", disk_id="abc123:local", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Tests for unraid_mcp.core.client — GraphQL client infrastructure."""
|
||||
|
||||
import json
|
||||
import time
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import httpx
|
||||
@@ -9,11 +10,13 @@ import pytest
|
||||
from unraid_mcp.core.client import (
|
||||
DEFAULT_TIMEOUT,
|
||||
DISK_TIMEOUT,
|
||||
_redact_sensitive,
|
||||
_QueryCache,
|
||||
_RateLimiter,
|
||||
is_idempotent_error,
|
||||
make_graphql_request,
|
||||
redact_sensitive,
|
||||
)
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.core.exceptions import CredentialsNotConfiguredError, ToolError
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -57,7 +60,7 @@ class TestIsIdempotentError:
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _redact_sensitive
|
||||
# redact_sensitive
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -66,36 +69,36 @@ class TestRedactSensitive:
|
||||
|
||||
def test_flat_dict(self) -> None:
|
||||
data = {"username": "admin", "password": "hunter2", "host": "10.0.0.1"}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result["username"] == "admin"
|
||||
assert result["password"] == "***"
|
||||
assert result["host"] == "10.0.0.1"
|
||||
|
||||
def test_nested_dict(self) -> None:
|
||||
data = {"config": {"apiKey": "abc123", "url": "http://host"}}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result["config"]["apiKey"] == "***"
|
||||
assert result["config"]["url"] == "http://host"
|
||||
|
||||
def test_list_of_dicts(self) -> None:
|
||||
data = [{"token": "t1"}, {"name": "safe"}]
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result[0]["token"] == "***"
|
||||
assert result[1]["name"] == "safe"
|
||||
|
||||
def test_deeply_nested(self) -> None:
|
||||
data = {"a": {"b": {"c": {"secret": "deep"}}}}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result["a"]["b"]["c"]["secret"] == "***"
|
||||
|
||||
def test_non_dict_passthrough(self) -> None:
|
||||
assert _redact_sensitive("plain_string") == "plain_string"
|
||||
assert _redact_sensitive(42) == 42
|
||||
assert _redact_sensitive(None) is None
|
||||
assert redact_sensitive("plain_string") == "plain_string"
|
||||
assert redact_sensitive(42) == 42
|
||||
assert redact_sensitive(None) is None
|
||||
|
||||
def test_case_insensitive_keys(self) -> None:
|
||||
data = {"Password": "p1", "TOKEN": "t1", "ApiKey": "k1", "Secret": "s1", "Key": "x1"}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
for v in result.values():
|
||||
assert v == "***"
|
||||
|
||||
@@ -109,7 +112,7 @@ class TestRedactSensitive:
|
||||
"username": "safe",
|
||||
"host": "safe",
|
||||
}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result["user_password"] == "***"
|
||||
assert result["api_key_value"] == "***"
|
||||
assert result["auth_token_expiry"] == "***"
|
||||
@@ -119,12 +122,26 @@ class TestRedactSensitive:
|
||||
|
||||
def test_mixed_list_content(self) -> None:
|
||||
data = [{"key": "val"}, "string", 123, [{"token": "inner"}]]
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result[0]["key"] == "***"
|
||||
assert result[1] == "string"
|
||||
assert result[2] == 123
|
||||
assert result[3][0]["token"] == "***"
|
||||
|
||||
def test_new_sensitive_keys_are_redacted(self) -> None:
|
||||
"""PR-added keys: authorization, cookie, session, credential, passphrase, jwt."""
|
||||
data = {
|
||||
"authorization": "Bearer token123",
|
||||
"cookie": "session=abc",
|
||||
"jwt": "eyJ...",
|
||||
"credential": "secret_cred",
|
||||
"passphrase": "hunter2",
|
||||
"session": "sess_id",
|
||||
}
|
||||
result = redact_sensitive(data)
|
||||
for key, val in result.items():
|
||||
assert val == "***", f"Key '{key}' was not redacted"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Timeout constants
|
||||
@@ -156,8 +173,8 @@ class TestMakeGraphQLRequestSuccess:
|
||||
@pytest.fixture(autouse=True)
|
||||
def _patch_config(self):
|
||||
with (
|
||||
patch("unraid_mcp.core.client.UNRAID_API_URL", "https://unraid.local/graphql"),
|
||||
patch("unraid_mcp.core.client.UNRAID_API_KEY", "test-key"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_URL", "https://unraid.local/graphql"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_KEY", "test-key"),
|
||||
):
|
||||
yield
|
||||
|
||||
@@ -241,22 +258,22 @@ class TestMakeGraphQLRequestErrors:
|
||||
@pytest.fixture(autouse=True)
|
||||
def _patch_config(self):
|
||||
with (
|
||||
patch("unraid_mcp.core.client.UNRAID_API_URL", "https://unraid.local/graphql"),
|
||||
patch("unraid_mcp.core.client.UNRAID_API_KEY", "test-key"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_URL", "https://unraid.local/graphql"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_KEY", "test-key"),
|
||||
):
|
||||
yield
|
||||
|
||||
async def test_missing_api_url(self) -> None:
|
||||
with (
|
||||
patch("unraid_mcp.core.client.UNRAID_API_URL", ""),
|
||||
pytest.raises(ToolError, match="UNRAID_API_URL not configured"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_URL", ""),
|
||||
pytest.raises(CredentialsNotConfiguredError),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
async def test_missing_api_key(self) -> None:
|
||||
with (
|
||||
patch("unraid_mcp.core.client.UNRAID_API_KEY", ""),
|
||||
pytest.raises(ToolError, match="UNRAID_API_KEY not configured"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_KEY", ""),
|
||||
pytest.raises(CredentialsNotConfiguredError),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
@@ -274,7 +291,7 @@ class TestMakeGraphQLRequestErrors:
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="HTTP error 401"),
|
||||
pytest.raises(ToolError, match="Unraid API returned HTTP 401"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
@@ -292,7 +309,7 @@ class TestMakeGraphQLRequestErrors:
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="HTTP error 500"),
|
||||
pytest.raises(ToolError, match="Unraid API returned HTTP 500"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
@@ -310,7 +327,7 @@ class TestMakeGraphQLRequestErrors:
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="HTTP error 503"),
|
||||
pytest.raises(ToolError, match="Unraid API returned HTTP 503"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
@@ -320,7 +337,7 @@ class TestMakeGraphQLRequestErrors:
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="Network connection error"),
|
||||
pytest.raises(ToolError, match="Network error connecting to Unraid API"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
@@ -330,7 +347,7 @@ class TestMakeGraphQLRequestErrors:
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="Network connection error"),
|
||||
pytest.raises(ToolError, match="Network error connecting to Unraid API"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
@@ -344,7 +361,7 @@ class TestMakeGraphQLRequestErrors:
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="Invalid JSON response"),
|
||||
pytest.raises(ToolError, match=r"invalid response.*not valid JSON"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
@@ -360,8 +377,8 @@ class TestGraphQLErrorHandling:
|
||||
@pytest.fixture(autouse=True)
|
||||
def _patch_config(self):
|
||||
with (
|
||||
patch("unraid_mcp.core.client.UNRAID_API_URL", "https://unraid.local/graphql"),
|
||||
patch("unraid_mcp.core.client.UNRAID_API_KEY", "test-key"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_URL", "https://unraid.local/graphql"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_KEY", "test-key"),
|
||||
):
|
||||
yield
|
||||
|
||||
@@ -464,3 +481,240 @@ class TestGraphQLErrorHandling:
|
||||
pytest.raises(ToolError, match="GraphQL API error"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _RateLimiter
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRateLimiter:
|
||||
"""Unit tests for the token bucket rate limiter."""
|
||||
|
||||
async def test_acquire_consumes_one_token(self) -> None:
|
||||
limiter = _RateLimiter(max_tokens=10, refill_rate=1.0)
|
||||
initial = limiter.tokens
|
||||
await limiter.acquire()
|
||||
assert limiter.tokens == pytest.approx(initial - 1, abs=1e-3)
|
||||
|
||||
async def test_acquire_succeeds_when_tokens_available(self) -> None:
|
||||
limiter = _RateLimiter(max_tokens=5, refill_rate=1.0)
|
||||
# Should complete without sleeping
|
||||
for _ in range(5):
|
||||
await limiter.acquire()
|
||||
# _refill() runs during each acquire() call and adds a tiny time-based
|
||||
# amount; check < 1.0 (not enough for another immediate request) rather
|
||||
# than == 0.0 to avoid flakiness from timing.
|
||||
assert limiter.tokens < 1.0
|
||||
|
||||
async def test_tokens_do_not_exceed_max(self) -> None:
|
||||
limiter = _RateLimiter(max_tokens=10, refill_rate=1.0)
|
||||
# Force refill with large elapsed time
|
||||
limiter.last_refill = time.monotonic() - 100.0 # 100 seconds ago
|
||||
limiter._refill()
|
||||
assert limiter.tokens == 10.0 # Capped at max_tokens
|
||||
|
||||
async def test_refill_adds_tokens_based_on_elapsed(self) -> None:
|
||||
limiter = _RateLimiter(max_tokens=100, refill_rate=10.0)
|
||||
limiter.tokens = 0.0
|
||||
limiter.last_refill = time.monotonic() - 1.0 # 1 second ago
|
||||
limiter._refill()
|
||||
# Should have refilled ~10 tokens (10.0 rate * 1.0 sec)
|
||||
assert 9.5 < limiter.tokens < 10.5
|
||||
|
||||
async def test_acquire_sleeps_when_no_tokens(self) -> None:
|
||||
"""When tokens are exhausted, acquire should sleep before consuming."""
|
||||
limiter = _RateLimiter(max_tokens=1, refill_rate=1.0)
|
||||
limiter.tokens = 0.0
|
||||
|
||||
sleep_calls = []
|
||||
|
||||
async def fake_sleep(duration: float) -> None:
|
||||
sleep_calls.append(duration)
|
||||
# Simulate refill by advancing last_refill so tokens replenish
|
||||
limiter.tokens = 1.0
|
||||
limiter.last_refill = time.monotonic()
|
||||
|
||||
with patch("unraid_mcp.core.client.asyncio.sleep", side_effect=fake_sleep):
|
||||
await limiter.acquire()
|
||||
|
||||
assert len(sleep_calls) == 1
|
||||
assert sleep_calls[0] > 0
|
||||
|
||||
async def test_default_params_match_api_limits(self) -> None:
|
||||
"""Default rate limiter must use 90 tokens at 9.0/sec (10% headroom from 100/10s)."""
|
||||
limiter = _RateLimiter()
|
||||
assert limiter.max_tokens == 90
|
||||
assert limiter.refill_rate == 9.0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _QueryCache
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestQueryCache:
|
||||
"""Unit tests for the TTL query cache."""
|
||||
|
||||
async def test_miss_on_empty_cache(self) -> None:
|
||||
cache = _QueryCache()
|
||||
assert await cache.get("{ info }", None) is None
|
||||
|
||||
async def test_put_and_get_hit(self) -> None:
|
||||
cache = _QueryCache()
|
||||
data = {"result": "ok"}
|
||||
await cache.put("GetNetworkConfig { }", None, data)
|
||||
result = await cache.get("GetNetworkConfig { }", None)
|
||||
assert result == data
|
||||
|
||||
async def test_expired_entry_returns_none(self) -> None:
|
||||
cache = _QueryCache()
|
||||
data = {"result": "ok"}
|
||||
await cache.put("GetNetworkConfig { }", None, data)
|
||||
# Manually expire the entry
|
||||
key = cache._cache_key("GetNetworkConfig { }", None)
|
||||
cache._store[key] = (time.monotonic() - 1.0, data) # expired 1 sec ago
|
||||
assert await cache.get("GetNetworkConfig { }", None) is None
|
||||
|
||||
async def test_invalidate_all_clears_store(self) -> None:
|
||||
cache = _QueryCache()
|
||||
await cache.put("GetNetworkConfig { }", None, {"x": 1})
|
||||
await cache.put("GetOwner { }", None, {"y": 2})
|
||||
assert len(cache._store) == 2
|
||||
await cache.invalidate_all()
|
||||
assert len(cache._store) == 0
|
||||
|
||||
async def test_variables_affect_cache_key(self) -> None:
|
||||
"""Different variables produce different cache keys."""
|
||||
cache = _QueryCache()
|
||||
q = "GetNetworkConfig($id: ID!) { network(id: $id) { name } }"
|
||||
await cache.put(q, {"id": "1"}, {"name": "eth0"})
|
||||
await cache.put(q, {"id": "2"}, {"name": "eth1"})
|
||||
assert await cache.get(q, {"id": "1"}) == {"name": "eth0"}
|
||||
assert await cache.get(q, {"id": "2"}) == {"name": "eth1"}
|
||||
|
||||
def test_is_cacheable_returns_true_for_known_prefixes(self) -> None:
|
||||
assert _QueryCache.is_cacheable("GetNetworkConfig { ... }") is True
|
||||
assert _QueryCache.is_cacheable("GetRegistrationInfo { ... }") is True
|
||||
assert _QueryCache.is_cacheable("GetOwner { ... }") is True
|
||||
assert _QueryCache.is_cacheable("GetFlash { ... }") is True
|
||||
|
||||
def test_is_cacheable_returns_false_for_mutations(self) -> None:
|
||||
assert _QueryCache.is_cacheable('mutation { docker { start(id: "x") } }') is False
|
||||
|
||||
def test_is_cacheable_returns_false_for_unlisted_queries(self) -> None:
|
||||
assert _QueryCache.is_cacheable("{ docker { containers { id } } }") is False
|
||||
assert _QueryCache.is_cacheable("{ info { os } }") is False
|
||||
|
||||
def test_is_cacheable_mutation_check_is_prefix(self) -> None:
|
||||
"""Queries that start with 'mutation' after whitespace are not cacheable."""
|
||||
assert _QueryCache.is_cacheable(" mutation { ... }") is False
|
||||
|
||||
def test_is_cacheable_with_explicit_query_keyword(self) -> None:
|
||||
"""Operation names after explicit 'query' keyword must be recognized."""
|
||||
assert _QueryCache.is_cacheable("query GetNetworkConfig { network { name } }") is True
|
||||
assert _QueryCache.is_cacheable("query GetOwner { owner { name } }") is True
|
||||
|
||||
def test_is_cacheable_anonymous_query_returns_false(self) -> None:
|
||||
"""Anonymous 'query { ... }' has no operation name — must not be cached."""
|
||||
assert _QueryCache.is_cacheable("query { network { name } }") is False
|
||||
|
||||
async def test_expired_entry_removed_from_store(self) -> None:
|
||||
"""Accessing an expired entry should remove it from the internal store."""
|
||||
cache = _QueryCache()
|
||||
await cache.put("GetOwner { }", None, {"owner": "root"})
|
||||
key = cache._cache_key("GetOwner { }", None)
|
||||
cache._store[key] = (time.monotonic() - 1.0, {"owner": "root"})
|
||||
assert key in cache._store
|
||||
await cache.get("GetOwner { }", None) # triggers deletion
|
||||
assert key not in cache._store
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# make_graphql_request — 429 retry behavior
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRateLimitRetry:
|
||||
"""Tests for the 429 retry loop in make_graphql_request."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _patch_config(self):
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_URL", "https://unraid.local/graphql"),
|
||||
patch("unraid_mcp.config.settings.UNRAID_API_KEY", "test-key"),
|
||||
patch("unraid_mcp.core.client.asyncio.sleep", new_callable=AsyncMock),
|
||||
):
|
||||
yield
|
||||
|
||||
def _make_429_response(self) -> MagicMock:
|
||||
resp = MagicMock()
|
||||
resp.status_code = 429
|
||||
resp.raise_for_status = MagicMock()
|
||||
return resp
|
||||
|
||||
def _make_ok_response(self, data: dict) -> MagicMock:
|
||||
resp = MagicMock()
|
||||
resp.status_code = 200
|
||||
resp.raise_for_status = MagicMock()
|
||||
resp.json.return_value = {"data": data}
|
||||
return resp
|
||||
|
||||
async def test_single_429_then_success_retries(self) -> None:
|
||||
"""One 429 followed by a success should return the data."""
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.side_effect = [
|
||||
self._make_429_response(),
|
||||
self._make_ok_response({"info": {"os": "Unraid"}}),
|
||||
]
|
||||
|
||||
with patch("unraid_mcp.core.client.get_http_client", return_value=mock_client):
|
||||
result = await make_graphql_request("{ info { os } }")
|
||||
|
||||
assert result == {"info": {"os": "Unraid"}}
|
||||
assert mock_client.post.call_count == 2
|
||||
|
||||
async def test_two_429s_then_success(self) -> None:
|
||||
"""Two 429s followed by success returns data after 2 retries."""
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.side_effect = [
|
||||
self._make_429_response(),
|
||||
self._make_429_response(),
|
||||
self._make_ok_response({"x": 1}),
|
||||
]
|
||||
|
||||
with patch("unraid_mcp.core.client.get_http_client", return_value=mock_client):
|
||||
result = await make_graphql_request("{ x }")
|
||||
|
||||
assert result == {"x": 1}
|
||||
assert mock_client.post.call_count == 3
|
||||
|
||||
async def test_three_429s_raises_tool_error(self) -> None:
|
||||
"""Three consecutive 429s (all retries exhausted) raises ToolError."""
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.side_effect = [
|
||||
self._make_429_response(),
|
||||
self._make_429_response(),
|
||||
self._make_429_response(),
|
||||
]
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="rate limiting"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
async def test_rate_limit_error_message_advises_wait(self) -> None:
|
||||
"""The ToolError message should tell the user to wait ~10 seconds."""
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.side_effect = [
|
||||
self._make_429_response(),
|
||||
self._make_429_response(),
|
||||
self._make_429_response(),
|
||||
]
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="10 seconds"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
59
tests/test_customization.py
Normal file
59
tests/test_customization.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# tests/test_customization.py
|
||||
"""Tests for customization subactions of the consolidated unraid tool."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql():
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
def _make_tool() -> Any:
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_theme_returns_customization(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"customization": {"theme": {"name": "azure"}, "partnerInfo": None, "activationCode": None}
|
||||
}
|
||||
result = await _make_tool()(action="customization", subaction="theme")
|
||||
assert result["customization"]["theme"]["name"] == "azure"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_public_theme(_mock_graphql):
|
||||
_mock_graphql.return_value = {"publicTheme": {"name": "black"}}
|
||||
result = await _make_tool()(action="customization", subaction="public_theme")
|
||||
assert result["publicTheme"]["name"] == "black"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_is_initial_setup(_mock_graphql):
|
||||
_mock_graphql.return_value = {"isInitialSetup": False}
|
||||
result = await _make_tool()(action="customization", subaction="is_initial_setup")
|
||||
assert result["isInitialSetup"] is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_set_theme_requires_theme(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="theme_name"):
|
||||
await _make_tool()(action="customization", subaction="set_theme")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_set_theme_success(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"customization": {"setTheme": {"name": "azure", "showBannerImage": True}}
|
||||
}
|
||||
result = await _make_tool()(action="customization", subaction="set_theme", theme_name="azure")
|
||||
assert result["success"] is True
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_docker tool."""
|
||||
"""Tests for docker subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -7,47 +7,6 @@ import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.tools.docker import find_container_by_identifier, get_available_container_names
|
||||
|
||||
|
||||
# --- Unit tests for helpers ---
|
||||
|
||||
|
||||
class TestFindContainerByIdentifier:
|
||||
def test_by_exact_id(self) -> None:
|
||||
containers = [{"id": "abc123", "names": ["plex"]}]
|
||||
assert find_container_by_identifier("abc123", containers) == containers[0]
|
||||
|
||||
def test_by_exact_name(self) -> None:
|
||||
containers = [{"id": "abc123", "names": ["plex"]}]
|
||||
assert find_container_by_identifier("plex", containers) == containers[0]
|
||||
|
||||
def test_fuzzy_match(self) -> None:
|
||||
containers = [{"id": "abc123", "names": ["plex-media-server"]}]
|
||||
result = find_container_by_identifier("plex", containers)
|
||||
assert result == containers[0]
|
||||
|
||||
def test_not_found(self) -> None:
|
||||
containers = [{"id": "abc123", "names": ["plex"]}]
|
||||
assert find_container_by_identifier("sonarr", containers) is None
|
||||
|
||||
def test_empty_list(self) -> None:
|
||||
assert find_container_by_identifier("plex", []) is None
|
||||
|
||||
|
||||
class TestGetAvailableContainerNames:
|
||||
def test_extracts_names(self) -> None:
|
||||
containers = [
|
||||
{"names": ["plex"]},
|
||||
{"names": ["sonarr", "sonarr-v3"]},
|
||||
]
|
||||
names = get_available_container_names(containers)
|
||||
assert "plex" in names
|
||||
assert "sonarr" in names
|
||||
assert "sonarr-v3" in names
|
||||
|
||||
def test_empty(self) -> None:
|
||||
assert get_available_container_names([]) == []
|
||||
|
||||
|
||||
# --- Integration tests ---
|
||||
@@ -55,30 +14,35 @@ class TestGetAvailableContainerNames:
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestDockerValidation:
|
||||
async def test_remove_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action="remove", container_id="abc123")
|
||||
|
||||
@pytest.mark.parametrize("action", ["start", "stop", "details", "logs", "pause", "unpause"])
|
||||
async def test_container_actions_require_id(self, _mock_graphql: AsyncMock, action: str) -> None:
|
||||
@pytest.mark.parametrize("subaction", ["start", "stop", "details"])
|
||||
async def test_container_actions_require_id(
|
||||
self, _mock_graphql: AsyncMock, subaction: str
|
||||
) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="container_id"):
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action="docker", subaction=subaction)
|
||||
|
||||
async def test_network_details_requires_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="network_id"):
|
||||
await tool_fn(action="network_details")
|
||||
await tool_fn(action="docker", subaction="network_details")
|
||||
|
||||
async def test_non_logs_action_ignores_tail_lines_validation(
|
||||
self, _mock_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_graphql.return_value = {"docker": {"containers": []}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="docker", subaction="list")
|
||||
assert result["containers"] == []
|
||||
|
||||
|
||||
class TestDockerActions:
|
||||
@@ -87,20 +51,14 @@ class TestDockerActions:
|
||||
"docker": {"containers": [{"id": "c1", "names": ["plex"], "state": "running"}]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="docker", subaction="list")
|
||||
assert len(result["containers"]) == 1
|
||||
|
||||
async def test_start_container(self, _mock_graphql: AsyncMock) -> None:
|
||||
# First call resolves ID, second performs start
|
||||
cid = "a" * 64 + ":local"
|
||||
_mock_graphql.side_effect = [
|
||||
{
|
||||
"docker": {
|
||||
"containers": [
|
||||
{"id": cid, "names": ["plex"]}
|
||||
]
|
||||
}
|
||||
},
|
||||
{"docker": {"containers": [{"id": cid, "names": ["plex"]}]}},
|
||||
{
|
||||
"docker": {
|
||||
"start": {
|
||||
@@ -111,31 +69,15 @@ class TestDockerActions:
|
||||
},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="start", container_id="plex")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_networks(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"dockerNetworks": [{"id": "net:1", "name": "bridge"}]}
|
||||
_mock_graphql.return_value = {"docker": {"networks": [{"id": "net:1", "name": "bridge"}]}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="networks")
|
||||
result = await tool_fn(action="docker", subaction="networks")
|
||||
assert len(result["networks"]) == 1
|
||||
|
||||
async def test_port_conflicts(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"docker": {"portConflicts": []}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="port_conflicts")
|
||||
assert result["port_conflicts"] == []
|
||||
|
||||
async def test_check_updates(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"docker": {
|
||||
"containerUpdateStatuses": [{"id": "c1", "name": "plex", "updateAvailable": True}]
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check_updates")
|
||||
assert len(result["update_statuses"]) == 1
|
||||
|
||||
async def test_idempotent_start(self, _mock_graphql: AsyncMock) -> None:
|
||||
# Resolve + idempotent success
|
||||
_mock_graphql.side_effect = [
|
||||
@@ -143,7 +85,7 @@ class TestDockerActions:
|
||||
{"idempotent_success": True, "docker": {}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="start", container_id="plex")
|
||||
assert result["idempotent"] is True
|
||||
|
||||
async def test_restart(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -154,9 +96,9 @@ class TestDockerActions:
|
||||
{"docker": {"start": {"id": cid, "state": "running"}}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="restart", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="restart", container_id="plex")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "restart"
|
||||
assert result["subaction"] == "restart"
|
||||
|
||||
async def test_restart_idempotent_stop(self, _mock_graphql: AsyncMock) -> None:
|
||||
cid = "a" * 64 + ":local"
|
||||
@@ -166,29 +108,10 @@ class TestDockerActions:
|
||||
{"docker": {"start": {"id": cid, "state": "running"}}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="restart", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="restart", container_id="plex")
|
||||
assert result["success"] is True
|
||||
assert "note" in result
|
||||
|
||||
async def test_update_all(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"docker": {"updateAllContainers": [{"id": "c1", "state": "running"}]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="update_all")
|
||||
assert result["success"] is True
|
||||
assert len(result["containers"]) == 1
|
||||
|
||||
async def test_remove_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
cid = "a" * 64 + ":local"
|
||||
_mock_graphql.side_effect = [
|
||||
{"docker": {"containers": [{"id": cid, "names": ["old-app"]}]}},
|
||||
{"docker": {"removeContainer": True}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="remove", container_id="old-app", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_details_found(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"docker": {
|
||||
@@ -198,51 +121,38 @@ class TestDockerActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="details", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="details", container_id="plex")
|
||||
assert result["names"] == ["plex"]
|
||||
|
||||
async def test_logs(self, _mock_graphql: AsyncMock) -> None:
|
||||
cid = "a" * 64 + ":local"
|
||||
_mock_graphql.side_effect = [
|
||||
{"docker": {"containers": [{"id": cid, "names": ["plex"]}]}},
|
||||
{"docker": {"logs": "2026-02-08 log line here"}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="logs", container_id="plex")
|
||||
assert "log line" in result["logs"]
|
||||
|
||||
async def test_pause_container(self, _mock_graphql: AsyncMock) -> None:
|
||||
cid = "a" * 64 + ":local"
|
||||
_mock_graphql.side_effect = [
|
||||
{"docker": {"containers": [{"id": cid, "names": ["plex"]}]}},
|
||||
{"docker": {"pause": {"id": cid, "state": "paused"}}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="pause", container_id="plex")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_generic_exception_wraps_in_tool_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("unexpected failure")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="unexpected failure"):
|
||||
await tool_fn(action="list")
|
||||
with pytest.raises(ToolError, match="Failed to execute docker/list"):
|
||||
await tool_fn(action="docker", subaction="list")
|
||||
|
||||
async def test_short_id_prefix_ambiguous_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"docker": {
|
||||
"containers": [
|
||||
{
|
||||
"id": "abcdef1234560000000000000000000000000000000000000000000000000000:local",
|
||||
"names": ["plex"],
|
||||
},
|
||||
{
|
||||
"id": "abcdef1234561111111111111111111111111111111111111111111111111111:local",
|
||||
"names": ["sonarr"],
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="ambiguous"):
|
||||
await tool_fn(action="docker", subaction="details", container_id="abcdef123456")
|
||||
|
||||
|
||||
class TestDockerMutationFailures:
|
||||
"""Tests for mutation responses that indicate failure or unexpected shapes."""
|
||||
|
||||
async def test_remove_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""removeContainer returning null instead of True."""
|
||||
cid = "a" * 64 + ":local"
|
||||
_mock_graphql.side_effect = [
|
||||
{"docker": {"containers": [{"id": cid, "names": ["old-app"]}]}},
|
||||
{"docker": {"removeContainer": None}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="remove", container_id="old-app", confirm=True)
|
||||
assert result["success"] is True
|
||||
assert result["container"] is None
|
||||
|
||||
async def test_start_mutation_empty_docker_response(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""docker field returning empty object (missing the action sub-field)."""
|
||||
cid = "a" * 64 + ":local"
|
||||
@@ -251,7 +161,7 @@ class TestDockerMutationFailures:
|
||||
{"docker": {}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="start", container_id="plex")
|
||||
assert result["success"] is True
|
||||
assert result["container"] is None
|
||||
|
||||
@@ -263,18 +173,10 @@ class TestDockerMutationFailures:
|
||||
{"docker": {"stop": {"id": cid, "state": "running"}}},
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="stop", container_id="plex")
|
||||
result = await tool_fn(action="docker", subaction="stop", container_id="plex")
|
||||
assert result["success"] is True
|
||||
assert result["container"]["state"] == "running"
|
||||
|
||||
async def test_update_all_returns_empty_list(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""update_all with no containers to update."""
|
||||
_mock_graphql.return_value = {"docker": {"updateAllContainers": []}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="update_all")
|
||||
assert result["success"] is True
|
||||
assert result["containers"] == []
|
||||
|
||||
async def test_mutation_timeout(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Mid-operation timeout during a docker mutation."""
|
||||
|
||||
@@ -285,7 +187,7 @@ class TestDockerMutationFailures:
|
||||
]
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="timed out"):
|
||||
await tool_fn(action="start", container_id="plex")
|
||||
await tool_fn(action="docker", subaction="start", container_id="plex")
|
||||
|
||||
|
||||
class TestDockerNetworkErrors:
|
||||
@@ -298,14 +200,14 @@ class TestDockerNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Connection refused"):
|
||||
await tool_fn(action="list")
|
||||
await tool_fn(action="docker", subaction="list")
|
||||
|
||||
async def test_list_http_401_unauthorized(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""HTTP 401 should propagate as ToolError."""
|
||||
_mock_graphql.side_effect = ToolError("HTTP error 401: Unauthorized")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="401"):
|
||||
await tool_fn(action="list")
|
||||
await tool_fn(action="docker", subaction="list")
|
||||
|
||||
async def test_json_decode_error_on_list(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Invalid JSON response should be wrapped in ToolError."""
|
||||
@@ -314,4 +216,4 @@ class TestDockerNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid JSON"):
|
||||
await tool_fn(action="list")
|
||||
await tool_fn(action="docker", subaction="list")
|
||||
|
||||
90
tests/test_guards.py
Normal file
90
tests/test_guards.py
Normal file
@@ -0,0 +1,90 @@
|
||||
"""Unit tests for unraid_mcp.core.guards."""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from fastmcp.exceptions import ToolError
|
||||
|
||||
from unraid_mcp.core.guards import gate_destructive_action
|
||||
|
||||
|
||||
DESTRUCTIVE = {"delete", "wipe"}
|
||||
|
||||
|
||||
class TestGateDestructiveAction:
|
||||
"""gate_destructive_action raises ToolError or elicits based on state."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_non_destructive_action_passes_through(self) -> None:
|
||||
"""Non-destructive actions are never blocked."""
|
||||
await gate_destructive_action(None, "list", DESTRUCTIVE, False, "irrelevant")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_confirm_true_bypasses_elicitation(self) -> None:
|
||||
"""confirm=True skips elicitation entirely."""
|
||||
with patch("unraid_mcp.core.guards.elicit_destructive_confirmation") as mock_elicit:
|
||||
await gate_destructive_action(None, "delete", DESTRUCTIVE, True, "desc")
|
||||
mock_elicit.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_ctx_raises_tool_error(self) -> None:
|
||||
"""ctx=None means elicitation returns False → ToolError."""
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await gate_destructive_action(None, "delete", DESTRUCTIVE, False, "desc")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicitation_accepted_does_not_raise(self) -> None:
|
||||
"""When elicitation returns True, no ToolError is raised."""
|
||||
with patch(
|
||||
"unraid_mcp.core.guards.elicit_destructive_confirmation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=True,
|
||||
):
|
||||
await gate_destructive_action(object(), "delete", DESTRUCTIVE, False, "desc")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicitation_declined_raises_tool_error(self) -> None:
|
||||
"""When elicitation returns False, ToolError is raised."""
|
||||
with (
|
||||
patch(
|
||||
"unraid_mcp.core.guards.elicit_destructive_confirmation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=False,
|
||||
) as mock_elicit,
|
||||
pytest.raises(ToolError, match="confirm=True"),
|
||||
):
|
||||
await gate_destructive_action(object(), "delete", DESTRUCTIVE, False, "desc")
|
||||
mock_elicit.assert_called_once()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_string_description_passed_to_elicitation(self) -> None:
|
||||
"""A plain string description is forwarded as-is."""
|
||||
with patch(
|
||||
"unraid_mcp.core.guards.elicit_destructive_confirmation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=True,
|
||||
) as mock_elicit:
|
||||
await gate_destructive_action(
|
||||
object(), "delete", DESTRUCTIVE, False, "Delete everything."
|
||||
)
|
||||
_, _, desc = mock_elicit.call_args.args
|
||||
assert desc == "Delete everything."
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dict_description_resolves_by_action(self) -> None:
|
||||
"""A dict description is resolved by action key."""
|
||||
descs = {"delete": "Delete desc.", "wipe": "Wipe desc."}
|
||||
with patch(
|
||||
"unraid_mcp.core.guards.elicit_destructive_confirmation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=True,
|
||||
) as mock_elicit:
|
||||
await gate_destructive_action(object(), "wipe", DESTRUCTIVE, False, descs)
|
||||
_, _, desc = mock_elicit.call_args.args
|
||||
assert desc == "Wipe desc."
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_error_message_contains_action_name(self) -> None:
|
||||
"""ToolError message includes the action name."""
|
||||
with pytest.raises(ToolError, match="'delete'"):
|
||||
await gate_destructive_action(None, "delete", DESTRUCTIVE, False, "desc")
|
||||
@@ -1,36 +1,37 @@
|
||||
"""Tests for unraid_health tool."""
|
||||
"""Tests for health subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.core.utils import safe_display_url
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.health.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.health", "register_health_tool", "unraid_health")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestHealthValidation:
|
||||
async def test_invalid_action(self, _mock_graphql: AsyncMock) -> None:
|
||||
async def test_invalid_subaction(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="invalid")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="health", subaction="invalid")
|
||||
|
||||
|
||||
class TestHealthActions:
|
||||
async def test_test_connection(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"online": True}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="test_connection")
|
||||
result = await tool_fn(action="health", subaction="test_connection")
|
||||
assert result["status"] == "connected"
|
||||
assert result["online"] is True
|
||||
assert "latency_ms" in result
|
||||
@@ -45,13 +46,38 @@ class TestHealthActions:
|
||||
},
|
||||
"array": {"state": "STARTED"},
|
||||
"notifications": {"overview": {"unread": {"alert": 0, "warning": 0, "total": 3}}},
|
||||
"docker": {"containers": [{"id": "c1", "state": "running", "status": "Up 2 days"}]},
|
||||
"docker": {"containers": [{"id": "c1", "state": "RUNNING", "status": "Up 2 days"}]},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
assert result["status"] == "healthy"
|
||||
assert "api_latency_ms" in result
|
||||
|
||||
async def test_check_docker_counts_uppercase_states(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""ContainerState enum is UPPERCASE — running/stopped counts must use case-insensitive match."""
|
||||
_mock_graphql.return_value = {
|
||||
"info": {
|
||||
"machineId": "x",
|
||||
"versions": {"core": {"unraid": "7.0"}},
|
||||
"os": {"uptime": 1},
|
||||
},
|
||||
"array": {"state": "STARTED"},
|
||||
"notifications": {"overview": {"unread": {"alert": 0, "warning": 0, "total": 0}}},
|
||||
"docker": {
|
||||
"containers": [
|
||||
{"id": "c1", "state": "RUNNING"},
|
||||
{"id": "c2", "state": "RUNNING"},
|
||||
{"id": "c3", "state": "EXITED"},
|
||||
]
|
||||
},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
svc = result["docker_services"]
|
||||
assert svc["total"] == 3
|
||||
assert svc["running"] == 2
|
||||
assert svc["stopped"] == 1
|
||||
|
||||
async def test_check_warning_on_alerts(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"info": {"machineId": "abc", "versions": {"unraid": "7.2"}, "os": {"uptime": 100}},
|
||||
@@ -60,20 +86,20 @@ class TestHealthActions:
|
||||
"docker": {"containers": []},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
assert result["status"] == "warning"
|
||||
assert any("alert" in i for i in result.get("issues", []))
|
||||
|
||||
async def test_check_no_data(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
assert result["status"] == "unhealthy"
|
||||
|
||||
async def test_check_api_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = Exception("Connection refused")
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
assert result["status"] == "unhealthy"
|
||||
assert "Connection refused" in result["error"]
|
||||
|
||||
@@ -86,56 +112,306 @@ class TestHealthActions:
|
||||
"docker": {"containers": []},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="check")
|
||||
result = await tool_fn(action="health", subaction="check")
|
||||
# Missing info escalates to "degraded"; alerts only escalate to "warning"
|
||||
# Severity should stay at "degraded" (not downgrade to "warning")
|
||||
assert result["status"] == "degraded"
|
||||
|
||||
async def test_diagnose_wraps_exception(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""When _diagnose_subscriptions raises, tool wraps in ToolError."""
|
||||
tool_fn = _make_tool()
|
||||
with (
|
||||
patch(
|
||||
"unraid_mcp.tools.health._diagnose_subscriptions",
|
||||
side_effect=RuntimeError("broken"),
|
||||
),
|
||||
pytest.raises(ToolError, match="broken"),
|
||||
):
|
||||
await tool_fn(action="diagnose")
|
||||
|
||||
async def test_diagnose_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Diagnose returns subscription status when modules are available."""
|
||||
"""Diagnose returns subscription status."""
|
||||
tool_fn = _make_tool()
|
||||
mock_status = {
|
||||
"cpu_sub": {"runtime": {"connection_state": "connected", "last_error": None}},
|
||||
}
|
||||
with patch("unraid_mcp.tools.health._diagnose_subscriptions", return_value=mock_status):
|
||||
result = await tool_fn(action="diagnose")
|
||||
assert "cpu_sub" in result
|
||||
mock_status = {"cpu": {"connection_state": "connected"}}
|
||||
mock_manager = MagicMock()
|
||||
mock_manager.get_subscription_status = AsyncMock(return_value=mock_status)
|
||||
mock_manager.auto_start_enabled = True
|
||||
mock_manager.max_reconnect_attempts = 3
|
||||
mock_manager.subscription_configs = {}
|
||||
mock_manager.active_subscriptions = {}
|
||||
mock_manager.resource_data = {}
|
||||
|
||||
async def test_diagnose_import_error_internal(self) -> None:
|
||||
"""_diagnose_subscriptions catches ImportError and returns error dict."""
|
||||
import sys
|
||||
mock_cache = MagicMock()
|
||||
mock_cache.statistics.return_value = MagicMock(call_tool=None)
|
||||
mock_error = MagicMock()
|
||||
mock_error.get_error_stats.return_value = {}
|
||||
|
||||
from unraid_mcp.tools.health import _diagnose_subscriptions
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager.subscription_manager", mock_manager),
|
||||
patch("unraid_mcp.subscriptions.resources.ensure_subscriptions_started", AsyncMock()),
|
||||
patch(
|
||||
"unraid_mcp.subscriptions.utils._analyze_subscription_status",
|
||||
return_value=(0, []),
|
||||
),
|
||||
patch("unraid_mcp.server.cache_middleware", mock_cache),
|
||||
patch("unraid_mcp.server.error_middleware", mock_error),
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="diagnose")
|
||||
assert "subscriptions" in result
|
||||
assert "summary" in result
|
||||
assert "cache" in result
|
||||
assert "errors" in result
|
||||
|
||||
# Remove cached subscription modules so the import is re-triggered
|
||||
cached = {k: v for k, v in sys.modules.items() if "unraid_mcp.subscriptions" in k}
|
||||
for k in cached:
|
||||
del sys.modules[k]
|
||||
async def test_diagnose_wraps_exception(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""When subscription manager raises, tool wraps in ToolError."""
|
||||
tool_fn = _make_tool()
|
||||
mock_manager = MagicMock()
|
||||
mock_manager.get_subscription_status = AsyncMock(side_effect=RuntimeError("broken"))
|
||||
|
||||
try:
|
||||
# Replace the modules with objects that raise ImportError on access
|
||||
with patch.dict(
|
||||
sys.modules,
|
||||
{
|
||||
"unraid_mcp.subscriptions": None,
|
||||
"unraid_mcp.subscriptions.manager": None,
|
||||
"unraid_mcp.subscriptions.resources": None,
|
||||
},
|
||||
):
|
||||
result = await _diagnose_subscriptions()
|
||||
assert "error" in result
|
||||
finally:
|
||||
# Restore cached modules
|
||||
sys.modules.update(cached)
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager.subscription_manager", mock_manager),
|
||||
patch("unraid_mcp.subscriptions.resources.ensure_subscriptions_started", AsyncMock()),
|
||||
patch(
|
||||
"unraid_mcp.subscriptions.utils._analyze_subscription_status",
|
||||
return_value=(0, []),
|
||||
),
|
||||
pytest.raises(ToolError, match="Failed to execute health/diagnose"),
|
||||
):
|
||||
await tool_fn(action="health", subaction="diagnose")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _safe_display_url — URL redaction helper
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSafeDisplayUrl:
|
||||
"""Verify that safe_display_url strips credentials/path and preserves scheme+host+port."""
|
||||
|
||||
def test_none_returns_none(self) -> None:
|
||||
assert safe_display_url(None) is None
|
||||
|
||||
def test_empty_string_returns_none(self) -> None:
|
||||
assert safe_display_url("") is None
|
||||
|
||||
def test_simple_url_scheme_and_host(self) -> None:
|
||||
assert safe_display_url("https://unraid.local/graphql") == "https://unraid.local"
|
||||
|
||||
def test_preserves_port(self) -> None:
|
||||
assert safe_display_url("https://10.1.0.2:31337/api/graphql") == "https://10.1.0.2:31337"
|
||||
|
||||
def test_strips_path(self) -> None:
|
||||
result = safe_display_url("http://unraid.local/some/deep/path?query=1")
|
||||
assert result is not None
|
||||
assert "path" not in result
|
||||
assert "query" not in result
|
||||
|
||||
def test_strips_credentials(self) -> None:
|
||||
result = safe_display_url("https://user:password@unraid.local/graphql")
|
||||
assert result is not None
|
||||
assert "user" not in result
|
||||
assert "password" not in result
|
||||
assert result == "https://unraid.local"
|
||||
|
||||
def test_strips_query_params(self) -> None:
|
||||
result = safe_display_url("http://host.local?token=abc&key=xyz")
|
||||
assert result is not None
|
||||
assert "token" not in result
|
||||
assert "abc" not in result
|
||||
|
||||
def test_http_scheme_preserved(self) -> None:
|
||||
result = safe_display_url("http://10.0.0.1:8080/api")
|
||||
assert result == "http://10.0.0.1:8080"
|
||||
|
||||
def test_tailscale_url(self) -> None:
|
||||
result = safe_display_url("https://100.118.209.1:31337/graphql")
|
||||
assert result == "https://100.118.209.1:31337"
|
||||
|
||||
def test_malformed_ipv6_url_returns_unparseable(self) -> None:
|
||||
"""Malformed IPv6 brackets in netloc cause urlparse.hostname to raise ValueError."""
|
||||
result = safe_display_url("https://[invalid")
|
||||
assert result == "<unparseable>"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_action_calls_elicitation() -> None:
|
||||
"""setup subaction triggers elicit_and_configure when no credentials exist."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = False
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=True)
|
||||
) as mock_elicit,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
assert mock_elicit.called
|
||||
assert "configured" in result.lower() or "success" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_action_returns_declined_message() -> None:
|
||||
"""setup subaction with declined elicitation returns appropriate message."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = False
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=False)),
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
assert (
|
||||
"not configured" in result.lower()
|
||||
or "declined" in result.lower()
|
||||
or "cancel" in result.lower()
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_already_configured_and_working_no_reset() -> None:
|
||||
"""setup returns early when credentials exist, connection works, and user declines reset."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(return_value={"online": True}),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_reset_confirmation",
|
||||
new=AsyncMock(return_value=False),
|
||||
),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure") as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
mock_configure.assert_not_called()
|
||||
assert "already configured" in result.lower()
|
||||
assert "no changes" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_already_configured_user_confirms_reset() -> None:
|
||||
"""setup proceeds with elicitation when credentials exist but user confirms reset."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(return_value={"online": True}),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_reset_confirmation",
|
||||
new=AsyncMock(return_value=True),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=True)
|
||||
) as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
mock_configure.assert_called_once()
|
||||
assert "configured" in result.lower() or "success" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_credentials_exist_but_connection_fails_user_confirms() -> None:
|
||||
"""setup prompts for confirmation even on failed probe, then reconfigures if confirmed."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(side_effect=Exception("connection refused")),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_reset_confirmation",
|
||||
new=AsyncMock(return_value=True),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=True)
|
||||
) as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
mock_configure.assert_called_once()
|
||||
assert "configured" in result.lower() or "success" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_credentials_exist_connection_fails_user_declines() -> None:
|
||||
"""setup returns 'no changes' when credentials exist (even with failed probe) and user declines."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(side_effect=Exception("connection refused")),
|
||||
),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.elicit_reset_confirmation",
|
||||
new=AsyncMock(return_value=False),
|
||||
),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure") as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
mock_configure.assert_not_called()
|
||||
assert "no changes" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_ctx_none_already_configured_returns_no_changes() -> None:
|
||||
"""When ctx=None and credentials are working, setup returns 'already configured' gracefully."""
|
||||
tool_fn = _make_tool()
|
||||
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = True
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(return_value={"online": True}),
|
||||
),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure") as mock_configure,
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=None)
|
||||
|
||||
mock_configure.assert_not_called()
|
||||
assert "already configured" in result.lower()
|
||||
assert "no changes" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_setup_declined_message_includes_manual_path() -> None:
|
||||
"""Declined setup message includes the exact credentials file path and variable names."""
|
||||
from unraid_mcp.config.settings import CREDENTIALS_ENV_PATH
|
||||
|
||||
tool_fn = _make_tool()
|
||||
|
||||
real_path_str = str(CREDENTIALS_ENV_PATH)
|
||||
mock_path = MagicMock()
|
||||
mock_path.exists.return_value = False
|
||||
type(mock_path).__str__ = lambda self: real_path_str # type: ignore[method-assign]
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.config.settings.CREDENTIALS_ENV_PATH", mock_path),
|
||||
patch("unraid_mcp.tools.unraid.elicit_and_configure", new=AsyncMock(return_value=False)),
|
||||
):
|
||||
result = await tool_fn(action="health", subaction="setup", ctx=MagicMock())
|
||||
|
||||
assert real_path_str in result
|
||||
assert "UNRAID_API_URL=" in result
|
||||
assert "UNRAID_API_KEY=" in result
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_info tool."""
|
||||
"""Tests for system subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -7,57 +7,12 @@ import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.tools.info import (
|
||||
_analyze_disk_health,
|
||||
_process_array_status,
|
||||
_process_system_info,
|
||||
)
|
||||
from unraid_mcp.tools.unraid import _analyze_disk_health
|
||||
|
||||
|
||||
# --- Unit tests for helper functions ---
|
||||
|
||||
|
||||
class TestProcessSystemInfo:
|
||||
def test_processes_os_info(self) -> None:
|
||||
raw = {
|
||||
"os": {
|
||||
"distro": "Unraid",
|
||||
"release": "7.2",
|
||||
"platform": "linux",
|
||||
"arch": "x86_64",
|
||||
"hostname": "tower",
|
||||
"uptime": 3600,
|
||||
},
|
||||
"cpu": {"manufacturer": "AMD", "brand": "Ryzen", "cores": 8, "threads": 16},
|
||||
}
|
||||
result = _process_system_info(raw)
|
||||
assert "summary" in result
|
||||
assert "details" in result
|
||||
assert result["summary"]["hostname"] == "tower"
|
||||
assert "AMD" in result["summary"]["cpu"]
|
||||
|
||||
def test_handles_missing_fields(self) -> None:
|
||||
result = _process_system_info({})
|
||||
assert result["summary"] == {"memory_summary": "Memory information not available."}
|
||||
|
||||
def test_processes_memory_layout(self) -> None:
|
||||
raw = {
|
||||
"memory": {
|
||||
"layout": [
|
||||
{
|
||||
"bank": "0",
|
||||
"type": "DDR4",
|
||||
"clockSpeed": 3200,
|
||||
"manufacturer": "G.Skill",
|
||||
"partNum": "XYZ",
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
result = _process_system_info(raw)
|
||||
assert len(result["summary"]["memory_layout_details"]) == 1
|
||||
|
||||
|
||||
class TestAnalyzeDiskHealth:
|
||||
def test_counts_healthy_disks(self) -> None:
|
||||
disks = [{"status": "DISK_OK"}, {"status": "DISK_OK"}]
|
||||
@@ -98,51 +53,17 @@ class TestAnalyzeDiskHealth:
|
||||
assert result["healthy"] == 0
|
||||
|
||||
|
||||
class TestProcessArrayStatus:
|
||||
def test_basic_array(self) -> None:
|
||||
raw = {
|
||||
"state": "STARTED",
|
||||
"capacity": {"kilobytes": {"free": "1048576", "used": "524288", "total": "1572864"}},
|
||||
"parities": [{"status": "DISK_OK"}],
|
||||
"disks": [{"status": "DISK_OK"}],
|
||||
"caches": [],
|
||||
}
|
||||
result = _process_array_status(raw)
|
||||
assert result["summary"]["state"] == "STARTED"
|
||||
assert result["summary"]["overall_health"] == "HEALTHY"
|
||||
|
||||
def test_critical_disk_threshold_array(self) -> None:
|
||||
raw = {
|
||||
"state": "STARTED",
|
||||
"parities": [],
|
||||
"disks": [{"status": "DISK_OK", "critical": 55}],
|
||||
"caches": [],
|
||||
}
|
||||
result = _process_array_status(raw)
|
||||
assert result["summary"]["overall_health"] == "CRITICAL"
|
||||
|
||||
def test_degraded_array(self) -> None:
|
||||
raw = {
|
||||
"state": "STARTED",
|
||||
"parities": [],
|
||||
"disks": [{"status": "DISK_NP"}],
|
||||
"caches": [],
|
||||
}
|
||||
result = _process_array_status(raw)
|
||||
assert result["summary"]["overall_health"] == "DEGRADED"
|
||||
|
||||
|
||||
# --- Integration tests for the tool function ---
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.info.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestUnraidInfoTool:
|
||||
@@ -160,47 +81,67 @@ class TestUnraidInfoTool:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="overview")
|
||||
result = await tool_fn(action="system", subaction="overview")
|
||||
assert "summary" in result
|
||||
_mock_graphql.assert_called_once()
|
||||
|
||||
async def test_ups_device_requires_device_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="device_id is required"):
|
||||
await tool_fn(action="ups_device")
|
||||
await tool_fn(action="system", subaction="ups_device")
|
||||
|
||||
async def test_network_action(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"network": {"id": "net:1", "accessUrls": []}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="network")
|
||||
assert result["id"] == "net:1"
|
||||
|
||||
async def test_connect_action(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"connect": {"status": "connected", "sandbox": False, "flashGuid": "abc123"}
|
||||
"servers": [
|
||||
{
|
||||
"id": "s:1",
|
||||
"name": "tootie",
|
||||
"status": "ONLINE",
|
||||
"lanip": "10.1.0.2",
|
||||
"wanip": "",
|
||||
"localurl": "http://10.1.0.2:6969",
|
||||
"remoteurl": "",
|
||||
}
|
||||
],
|
||||
"vars": {
|
||||
"id": "v:1",
|
||||
"port": 6969,
|
||||
"portssl": 31337,
|
||||
"localTld": "local",
|
||||
"useSsl": None,
|
||||
},
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="connect")
|
||||
assert result["status"] == "connected"
|
||||
result = await tool_fn(action="system", subaction="network")
|
||||
assert "accessUrls" in result
|
||||
assert result["httpPort"] == 6969
|
||||
assert result["httpsPort"] == 31337
|
||||
assert any(u["type"] == "LAN" and u["ipv4"] == "10.1.0.2" for u in result["accessUrls"])
|
||||
|
||||
async def test_connect_action_raises_tool_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid subaction 'connect'"):
|
||||
await tool_fn(action="system", subaction="connect")
|
||||
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("unexpected")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="unexpected"):
|
||||
await tool_fn(action="online")
|
||||
with pytest.raises(ToolError, match="Failed to execute system/online"):
|
||||
await tool_fn(action="system", subaction="online")
|
||||
|
||||
async def test_metrics(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"metrics": {"cpu": {"used": 25.5}, "memory": {"used": 8192, "total": 32768}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="metrics")
|
||||
result = await tool_fn(action="system", subaction="metrics")
|
||||
assert result["cpu"]["used"] == 25.5
|
||||
|
||||
async def test_services(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"services": [{"name": "docker", "state": "running"}]}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="services")
|
||||
result = await tool_fn(action="system", subaction="services")
|
||||
assert "services" in result
|
||||
assert len(result["services"]) == 1
|
||||
assert result["services"][0]["name"] == "docker"
|
||||
|
||||
@@ -209,14 +150,14 @@ class TestUnraidInfoTool:
|
||||
"settings": {"unified": {"values": {"timezone": "US/Eastern"}}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="settings")
|
||||
result = await tool_fn(action="system", subaction="settings")
|
||||
assert result["timezone"] == "US/Eastern"
|
||||
|
||||
async def test_settings_non_dict_values(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Settings values that are not a dict should be wrapped in {'raw': ...}."""
|
||||
_mock_graphql.return_value = {"settings": {"unified": {"values": "raw_string"}}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="settings")
|
||||
result = await tool_fn(action="system", subaction="settings")
|
||||
assert result == {"raw": "raw_string"}
|
||||
|
||||
async def test_servers(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -224,7 +165,8 @@ class TestUnraidInfoTool:
|
||||
"servers": [{"id": "s:1", "name": "tower", "status": "online"}]
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="servers")
|
||||
result = await tool_fn(action="system", subaction="servers")
|
||||
assert "servers" in result
|
||||
assert len(result["servers"]) == 1
|
||||
assert result["servers"][0]["name"] == "tower"
|
||||
|
||||
@@ -239,7 +181,7 @@ class TestUnraidInfoTool:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="flash")
|
||||
result = await tool_fn(action="system", subaction="flash")
|
||||
assert result["product"] == "SanDisk"
|
||||
|
||||
async def test_ups_devices(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -247,7 +189,8 @@ class TestUnraidInfoTool:
|
||||
"upsDevices": [{"id": "ups:1", "model": "APC", "status": "online", "charge": 100}]
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="ups_devices")
|
||||
result = await tool_fn(action="system", subaction="ups_devices")
|
||||
assert "ups_devices" in result
|
||||
assert len(result["ups_devices"]) == 1
|
||||
assert result["ups_devices"][0]["model"] == "APC"
|
||||
|
||||
@@ -260,7 +203,7 @@ class TestInfoNetworkErrors:
|
||||
_mock_graphql.side_effect = ToolError("HTTP error 401: Unauthorized")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="401"):
|
||||
await tool_fn(action="overview")
|
||||
await tool_fn(action="system", subaction="overview")
|
||||
|
||||
async def test_overview_connection_refused(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Connection refused should propagate as ToolError."""
|
||||
@@ -269,7 +212,7 @@ class TestInfoNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Connection refused"):
|
||||
await tool_fn(action="overview")
|
||||
await tool_fn(action="system", subaction="overview")
|
||||
|
||||
async def test_network_json_decode_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Invalid JSON from API should propagate as ToolError."""
|
||||
@@ -278,4 +221,17 @@ class TestInfoNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid JSON"):
|
||||
await tool_fn(action="network")
|
||||
await tool_fn(action="system", subaction="network")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Regression: removed actions must not be valid subactions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("subaction", ["update_server", "update_ssh"])
|
||||
async def test_removed_info_subactions_are_invalid(subaction: str) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="system", subaction=subaction)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_keys tool."""
|
||||
"""Tests for key subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,39 +11,39 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestKeysValidation:
|
||||
async def test_delete_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action="delete", key_id="k:1")
|
||||
with pytest.raises(ToolError, match="confirm=True"):
|
||||
await tool_fn(action="key", subaction="delete", key_id="k:1")
|
||||
|
||||
async def test_get_requires_key_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="key_id"):
|
||||
await tool_fn(action="get")
|
||||
await tool_fn(action="key", subaction="get")
|
||||
|
||||
async def test_create_requires_name(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="name"):
|
||||
await tool_fn(action="create")
|
||||
await tool_fn(action="key", subaction="create")
|
||||
|
||||
async def test_update_requires_key_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="key_id"):
|
||||
await tool_fn(action="update")
|
||||
await tool_fn(action="key", subaction="update")
|
||||
|
||||
async def test_delete_requires_key_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="key_id"):
|
||||
await tool_fn(action="delete", confirm=True)
|
||||
await tool_fn(action="key", subaction="delete", confirm=True)
|
||||
|
||||
|
||||
class TestKeysActions:
|
||||
@@ -52,7 +52,7 @@ class TestKeysActions:
|
||||
"apiKeys": [{"id": "k:1", "name": "mcp-key", "roles": ["admin"]}]
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="key", subaction="list")
|
||||
assert len(result["keys"]) == 1
|
||||
|
||||
async def test_get(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -60,45 +60,77 @@ class TestKeysActions:
|
||||
"apiKey": {"id": "k:1", "name": "mcp-key", "roles": ["admin"]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="get", key_id="k:1")
|
||||
result = await tool_fn(action="key", subaction="get", key_id="k:1")
|
||||
assert result["name"] == "mcp-key"
|
||||
|
||||
async def test_create(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"createApiKey": {"id": "k:new", "name": "new-key", "key": "secret123", "roles": []}
|
||||
"apiKey": {
|
||||
"create": {"id": "k:new", "name": "new-key", "key": "secret123", "roles": []}
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="create", name="new-key")
|
||||
result = await tool_fn(action="key", subaction="create", name="new-key")
|
||||
assert result["success"] is True
|
||||
assert result["key"]["name"] == "new-key"
|
||||
|
||||
async def test_create_with_roles(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"createApiKey": {
|
||||
"id": "k:new",
|
||||
"name": "admin-key",
|
||||
"key": "secret",
|
||||
"roles": ["admin"],
|
||||
"apiKey": {
|
||||
"create": {
|
||||
"id": "k:new",
|
||||
"name": "admin-key",
|
||||
"key": "secret",
|
||||
"roles": ["admin"],
|
||||
}
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="create", name="admin-key", roles=["admin"])
|
||||
result = await tool_fn(action="key", subaction="create", name="admin-key", roles=["admin"])
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_update(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"updateApiKey": {"id": "k:1", "name": "renamed", "roles": []}}
|
||||
_mock_graphql.return_value = {
|
||||
"apiKey": {"update": {"id": "k:1", "name": "renamed", "roles": []}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="update", key_id="k:1", name="renamed")
|
||||
result = await tool_fn(action="key", subaction="update", key_id="k:1", name="renamed")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_delete(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"deleteApiKeys": True}
|
||||
_mock_graphql.return_value = {"apiKey": {"delete": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="delete", key_id="k:1", confirm=True)
|
||||
result = await tool_fn(action="key", subaction="delete", key_id="k:1", confirm=True)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("connection lost")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="connection lost"):
|
||||
await tool_fn(action="list")
|
||||
with pytest.raises(ToolError, match="Failed to execute key/list"):
|
||||
await tool_fn(action="key", subaction="list")
|
||||
|
||||
async def test_add_role_requires_key_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="key_id"):
|
||||
await tool_fn(action="key", subaction="add_role", roles=["VIEWER"])
|
||||
|
||||
async def test_add_role_requires_role(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="role"):
|
||||
await tool_fn(action="key", subaction="add_role", key_id="abc:local")
|
||||
|
||||
async def test_add_role_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"apiKey": {"addRole": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="key", subaction="add_role", key_id="abc:local", roles=["VIEWER"]
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_remove_role_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"apiKey": {"removeRole": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="key", subaction="remove_role", key_id="abc:local", roles=["VIEWER"]
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
131
tests/test_live.py
Normal file
131
tests/test_live.py
Normal file
@@ -0,0 +1,131 @@
|
||||
# tests/test_live.py
|
||||
"""Tests for live subactions of the consolidated unraid tool."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_subscribe_once():
|
||||
with patch("unraid_mcp.subscriptions.snapshot.subscribe_once") as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_subscribe_collect():
|
||||
with patch("unraid_mcp.subscriptions.snapshot.subscribe_collect") as m:
|
||||
yield m
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cpu_returns_snapshot(_mock_subscribe_once):
|
||||
_mock_subscribe_once.return_value = {"systemMetricsCpu": {"percentTotal": 23.5, "cpus": []}}
|
||||
result = await _make_tool()(action="live", subaction="cpu")
|
||||
assert result["success"] is True
|
||||
assert result["data"]["systemMetricsCpu"]["percentTotal"] == 23.5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_memory_returns_snapshot(_mock_subscribe_once):
|
||||
_mock_subscribe_once.return_value = {
|
||||
"systemMetricsMemory": {"total": 32000000000, "used": 10000000000, "percentTotal": 31.2}
|
||||
}
|
||||
result = await _make_tool()(action="live", subaction="memory")
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_log_tail_requires_path(_mock_subscribe_collect):
|
||||
_mock_subscribe_collect.return_value = []
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="path"):
|
||||
await _make_tool()(action="live", subaction="log_tail")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_log_tail_with_path(_mock_subscribe_collect):
|
||||
_mock_subscribe_collect.return_value = [
|
||||
{"logFile": {"path": "/var/log/syslog", "content": "line1\nline2", "totalLines": 2}}
|
||||
]
|
||||
result = await _make_tool()(
|
||||
action="live", subaction="log_tail", path="/var/log/syslog", collect_for=1.0
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["event_count"] == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_notification_feed_collects_events(_mock_subscribe_collect):
|
||||
_mock_subscribe_collect.return_value = [
|
||||
{"notificationAdded": {"id": "1", "title": "Alert"}},
|
||||
{"notificationAdded": {"id": "2", "title": "Info"}},
|
||||
]
|
||||
result = await _make_tool()(action="live", subaction="notification_feed", collect_for=2.0)
|
||||
assert result["event_count"] == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_subaction_raises():
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await _make_tool()(action="live", subaction="nonexistent")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_snapshot_propagates_tool_error(_mock_subscribe_once):
|
||||
"""Non-event-driven (streaming) actions still propagate timeout as ToolError."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
_mock_subscribe_once.side_effect = ToolError("Subscription timed out after 10s")
|
||||
with pytest.raises(ToolError, match="timed out"):
|
||||
await _make_tool()(action="live", subaction="cpu")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_event_driven_timeout_returns_no_recent_events(_mock_subscribe_once):
|
||||
"""Event-driven subscriptions return a graceful no_recent_events response on timeout."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
_mock_subscribe_once.side_effect = ToolError("Subscription timed out after 10s")
|
||||
result = await _make_tool()(action="live", subaction="notifications_overview")
|
||||
assert result["success"] is True
|
||||
assert result["status"] == "no_recent_events"
|
||||
assert "No events received" in result["message"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_event_driven_non_timeout_error_propagates(_mock_subscribe_once):
|
||||
"""Non-timeout ToolErrors from event-driven subscriptions still propagate."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
_mock_subscribe_once.side_effect = ToolError("Subscription auth failed")
|
||||
with pytest.raises(ToolError, match="auth failed"):
|
||||
await _make_tool()(action="live", subaction="owner")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_log_tail_rejects_invalid_path(_mock_subscribe_collect):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="must start with"):
|
||||
await _make_tool()(action="live", subaction="log_tail", path="/etc/shadow")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_snapshot_wraps_bare_exception(_mock_subscribe_once):
|
||||
"""Bare exceptions from subscribe_once are wrapped in ToolError by tool_error_handler."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
_mock_subscribe_once.side_effect = RuntimeError("WebSocket connection refused")
|
||||
with pytest.raises(ToolError):
|
||||
await _make_tool()(action="live", subaction="cpu")
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_notifications tool."""
|
||||
"""Tests for notification subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,43 +11,44 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch(
|
||||
"unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn(
|
||||
"unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"
|
||||
)
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestNotificationsValidation:
|
||||
async def test_delete_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action="delete", notification_id="n:1", notification_type="UNREAD")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="n:1",
|
||||
notification_type="UNREAD",
|
||||
)
|
||||
|
||||
async def test_delete_archived_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action="delete_archived")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(action="notification", subaction="delete_archived")
|
||||
|
||||
async def test_create_requires_fields(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="requires title"):
|
||||
await tool_fn(action="create")
|
||||
await tool_fn(action="notification", subaction="create")
|
||||
|
||||
async def test_archive_requires_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="notification_id"):
|
||||
await tool_fn(action="archive")
|
||||
await tool_fn(action="notification", subaction="archive")
|
||||
|
||||
async def test_delete_requires_id_and_type(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="requires notification_id"):
|
||||
await tool_fn(action="delete", confirm=True)
|
||||
await tool_fn(action="notification", subaction="delete", confirm=True)
|
||||
|
||||
|
||||
class TestNotificationsActions:
|
||||
@@ -61,7 +62,7 @@ class TestNotificationsActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="overview")
|
||||
result = await tool_fn(action="notification", subaction="overview")
|
||||
assert result["unread"]["total"] == 7
|
||||
|
||||
async def test_list(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -69,26 +70,17 @@ class TestNotificationsActions:
|
||||
"notifications": {"list": [{"id": "n:1", "title": "Test", "importance": "INFO"}]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="notification", subaction="list")
|
||||
assert len(result["notifications"]) == 1
|
||||
|
||||
async def test_warnings(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"notifications": {"warningsAndAlerts": [{"id": "n:1", "importance": "WARNING"}]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="warnings")
|
||||
assert len(result["warnings"]) == 1
|
||||
|
||||
async def test_create(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"notifications": {
|
||||
"createNotification": {"id": "n:new", "title": "Test", "importance": "INFO"}
|
||||
}
|
||||
"createNotification": {"id": "n:new", "title": "Test", "importance": "INFO"}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create",
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="Test",
|
||||
subject="Test Subject",
|
||||
description="Test Desc",
|
||||
@@ -97,16 +89,22 @@ class TestNotificationsActions:
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_archive_notification(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"notifications": {"archiveNotification": True}}
|
||||
_mock_graphql.return_value = {"archiveNotification": {"id": "n:1"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="archive", notification_id="n:1")
|
||||
result = await tool_fn(action="notification", subaction="archive", notification_id="n:1")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_delete_with_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"notifications": {"deleteNotification": True}}
|
||||
_mock_graphql.return_value = {
|
||||
"deleteNotification": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="delete",
|
||||
action="notification",
|
||||
subaction="delete",
|
||||
notification_id="n:1",
|
||||
notification_type="unread",
|
||||
confirm=True,
|
||||
@@ -114,24 +112,33 @@ class TestNotificationsActions:
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_archive_all(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"notifications": {"archiveAll": True}}
|
||||
_mock_graphql.return_value = {
|
||||
"archiveAll": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 1},
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="archive_all")
|
||||
result = await tool_fn(action="notification", subaction="archive_all")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_unread_notification(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"notifications": {"unreadNotification": True}}
|
||||
async def test_mark_unread_notification(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"unreadNotification": {"id": "n:1"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="unread", notification_id="n:1")
|
||||
result = await tool_fn(
|
||||
action="notification", subaction="mark_unread", notification_id="n:1"
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "unread"
|
||||
assert result["subaction"] == "mark_unread"
|
||||
|
||||
async def test_list_with_importance_filter(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"notifications": {"list": [{"id": "n:1", "title": "Alert", "importance": "WARNING"}]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list", importance="warning", limit=10, offset=5)
|
||||
result = await tool_fn(
|
||||
action="notification", subaction="list", importance="warning", limit=10, offset=5
|
||||
)
|
||||
assert len(result["notifications"]) == 1
|
||||
call_args = _mock_graphql.call_args
|
||||
filter_var = call_args[0][1]["filter"]
|
||||
@@ -140,14 +147,182 @@ class TestNotificationsActions:
|
||||
assert filter_var["offset"] == 5
|
||||
|
||||
async def test_delete_archived(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"notifications": {"deleteArchivedNotifications": True}}
|
||||
_mock_graphql.return_value = {
|
||||
"deleteArchivedNotifications": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="delete_archived", confirm=True)
|
||||
result = await tool_fn(action="notification", subaction="delete_archived", confirm=True)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "delete_archived"
|
||||
assert result["subaction"] == "delete_archived"
|
||||
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("boom")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="boom"):
|
||||
await tool_fn(action="overview")
|
||||
with pytest.raises(ToolError, match="Failed to execute notification/overview"):
|
||||
await tool_fn(action="notification", subaction="overview")
|
||||
|
||||
|
||||
class TestNotificationsCreateValidation:
|
||||
"""Tests for importance enum and field length validation added in this PR."""
|
||||
|
||||
async def test_invalid_importance_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid importance"):
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="S",
|
||||
description="D",
|
||||
importance="invalid",
|
||||
)
|
||||
|
||||
async def test_normal_importance_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""NORMAL is not a valid GraphQL NotificationImportance value (INFO/WARNING/ALERT are)."""
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid importance"):
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="S",
|
||||
description="D",
|
||||
importance="normal",
|
||||
)
|
||||
|
||||
async def test_alert_importance_accepted(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"createNotification": {"id": "n:1", "importance": "ALERT"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="S",
|
||||
description="D",
|
||||
importance="alert",
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_title_too_long_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="title must be at most 200"):
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="x" * 201,
|
||||
subject="S",
|
||||
description="D",
|
||||
importance="info",
|
||||
)
|
||||
|
||||
async def test_subject_too_long_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="subject must be at most 500"):
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="x" * 501,
|
||||
description="D",
|
||||
importance="info",
|
||||
)
|
||||
|
||||
async def test_description_too_long_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="description must be at most 2000"):
|
||||
await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="T",
|
||||
subject="S",
|
||||
description="x" * 2001,
|
||||
importance="info",
|
||||
)
|
||||
|
||||
async def test_title_at_max_accepted(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"createNotification": {"id": "n:1", "importance": "INFO"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="notification",
|
||||
subaction="create",
|
||||
title="x" * 200,
|
||||
subject="S",
|
||||
description="D",
|
||||
importance="info",
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
class TestNewNotificationMutations:
|
||||
async def test_archive_many_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"archiveNotifications": {
|
||||
"unread": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
"archive": {"info": 2, "warning": 0, "alert": 0, "total": 2},
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="notification", subaction="archive_many", notification_ids=["n:1", "n:2"]
|
||||
)
|
||||
assert result["success"] is True
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"ids": ["n:1", "n:2"]}
|
||||
|
||||
async def test_archive_many_requires_ids(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="notification_ids"):
|
||||
await tool_fn(action="notification", subaction="archive_many")
|
||||
|
||||
async def test_unarchive_many_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"unarchiveNotifications": {
|
||||
"unread": {"info": 2, "warning": 0, "alert": 0, "total": 2},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="notification", subaction="unarchive_many", notification_ids=["n:1", "n:2"]
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_unarchive_many_requires_ids(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="notification_ids"):
|
||||
await tool_fn(action="notification", subaction="unarchive_many")
|
||||
|
||||
async def test_unarchive_all_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"unarchiveAll": {
|
||||
"unread": {"info": 5, "warning": 1, "alert": 0, "total": 6},
|
||||
"archive": {"info": 0, "warning": 0, "alert": 0, "total": 0},
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="notification", subaction="unarchive_all")
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_unarchive_all_with_importance(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Lowercase importance input must be uppercased before being sent to GraphQL."""
|
||||
_mock_graphql.return_value = {
|
||||
"unarchiveAll": {"unread": {"total": 1}, "archive": {"total": 0}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
await tool_fn(action="notification", subaction="unarchive_all", importance="warning")
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"importance": "WARNING"}
|
||||
|
||||
async def test_recalculate_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"recalculateOverview": {
|
||||
"unread": {"info": 3, "warning": 1, "alert": 0, "total": 4},
|
||||
"archive": {"info": 10, "warning": 0, "alert": 0, "total": 10},
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="notification", subaction="recalculate")
|
||||
assert result["success"] is True
|
||||
|
||||
64
tests/test_oidc.py
Normal file
64
tests/test_oidc.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# tests/test_oidc.py
|
||||
"""Tests for oidc subactions of the consolidated unraid tool."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql():
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as m:
|
||||
yield m
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_providers_returns_list(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"oidcProviders": [
|
||||
{"id": "1:local", "name": "Google", "clientId": "abc", "scopes": ["openid"]}
|
||||
]
|
||||
}
|
||||
result = await _make_tool()(action="oidc", subaction="providers")
|
||||
assert "providers" in result
|
||||
assert len(result["providers"]) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_public_providers(_mock_graphql):
|
||||
_mock_graphql.return_value = {"publicOidcProviders": []}
|
||||
result = await _make_tool()(action="oidc", subaction="public_providers")
|
||||
assert result["providers"] == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_provider_requires_provider_id(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="provider_id"):
|
||||
await _make_tool()(action="oidc", subaction="provider")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_session_requires_token(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="token"):
|
||||
await _make_tool()(action="oidc", subaction="validate_session")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_configuration(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"oidcConfiguration": {"providers": [], "defaultAllowedOrigins": []}
|
||||
}
|
||||
result = await _make_tool()(action="oidc", subaction="configuration")
|
||||
assert result["providers"] == []
|
||||
assert result["defaultAllowedOrigins"] == []
|
||||
63
tests/test_plugins.py
Normal file
63
tests/test_plugins.py
Normal file
@@ -0,0 +1,63 @@
|
||||
# tests/test_plugins.py
|
||||
"""Tests for plugin subactions of the consolidated unraid tool."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql():
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request") as m:
|
||||
yield m
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_returns_plugins(_mock_graphql):
|
||||
_mock_graphql.return_value = {
|
||||
"plugins": [
|
||||
{"name": "my-plugin", "version": "1.0.0", "hasApiModule": True, "hasCliModule": False}
|
||||
]
|
||||
}
|
||||
result = await _make_tool()(action="plugin", subaction="list")
|
||||
assert result["success"] is True
|
||||
assert len(result["data"]["plugins"]) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_requires_names(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="names"):
|
||||
await _make_tool()(action="plugin", subaction="add")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_success(_mock_graphql):
|
||||
_mock_graphql.return_value = {"addPlugin": False} # False = auto-restart triggered
|
||||
result = await _make_tool()(action="plugin", subaction="add", names=["my-plugin"])
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_requires_confirm(_mock_graphql):
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await _make_tool()(action="plugin", subaction="remove", names=["my-plugin"], confirm=False)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_with_confirm(_mock_graphql):
|
||||
_mock_graphql.return_value = {"removePlugin": True}
|
||||
result = await _make_tool()(
|
||||
action="plugin", subaction="remove", names=["my-plugin"], confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_rclone tool."""
|
||||
"""Tests for rclone subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,37 +11,36 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.rclone.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("_mock_graphql")
|
||||
class TestRcloneValidation:
|
||||
async def test_delete_requires_confirm(self) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action="delete_remote", name="gdrive")
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(action="rclone", subaction="delete_remote", name="gdrive")
|
||||
|
||||
async def test_create_requires_fields(self) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="requires name"):
|
||||
await tool_fn(action="create_remote")
|
||||
await tool_fn(action="rclone", subaction="create_remote")
|
||||
|
||||
async def test_delete_requires_name(self) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="name is required"):
|
||||
await tool_fn(action="delete_remote", confirm=True)
|
||||
await tool_fn(action="rclone", subaction="delete_remote", confirm=True)
|
||||
|
||||
|
||||
class TestRcloneActions:
|
||||
async def test_list_remotes(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"remotes": [{"name": "gdrive", "type": "drive"}]}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list_remotes")
|
||||
result = await tool_fn(action="rclone", subaction="list_remotes")
|
||||
assert len(result["remotes"]) == 1
|
||||
|
||||
async def test_config_form(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -49,7 +48,7 @@ class TestRcloneActions:
|
||||
"rclone": {"configForm": {"id": "form:1", "dataSchema": {}, "uiSchema": {}}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="config_form")
|
||||
result = await tool_fn(action="rclone", subaction="config_form")
|
||||
assert result["id"] == "form:1"
|
||||
|
||||
async def test_config_form_with_provider(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -57,7 +56,7 @@ class TestRcloneActions:
|
||||
"rclone": {"configForm": {"id": "form:s3", "dataSchema": {}, "uiSchema": {}}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="config_form", provider_type="s3")
|
||||
result = await tool_fn(action="rclone", subaction="config_form", provider_type="s3")
|
||||
assert result["id"] == "form:s3"
|
||||
call_args = _mock_graphql.call_args
|
||||
assert call_args[0][1] == {"formOptions": {"providerType": "s3"}}
|
||||
@@ -68,7 +67,8 @@ class TestRcloneActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="newremote",
|
||||
provider_type="s3",
|
||||
config_data={"bucket": "mybucket"},
|
||||
@@ -82,7 +82,8 @@ class TestRcloneActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="create_remote",
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="ftp-remote",
|
||||
provider_type="ftp",
|
||||
config_data={},
|
||||
@@ -92,11 +93,98 @@ class TestRcloneActions:
|
||||
async def test_delete_remote(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"deleteRCloneRemote": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="delete_remote", name="gdrive", confirm=True)
|
||||
result = await tool_fn(
|
||||
action="rclone", subaction="delete_remote", name="gdrive", confirm=True
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_delete_remote_failure(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"deleteRCloneRemote": False}}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to delete"):
|
||||
await tool_fn(action="delete_remote", name="gdrive", confirm=True)
|
||||
await tool_fn(action="rclone", subaction="delete_remote", name="gdrive", confirm=True)
|
||||
|
||||
|
||||
class TestRcloneConfigDataValidation:
|
||||
"""Tests for _validate_config_data security guards."""
|
||||
|
||||
async def test_path_traversal_in_key_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="disallowed characters"):
|
||||
await tool_fn(
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"../evil": "value"},
|
||||
)
|
||||
|
||||
async def test_shell_metachar_in_key_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="disallowed characters"):
|
||||
await tool_fn(
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"key;rm": "value"},
|
||||
)
|
||||
|
||||
async def test_too_many_keys_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="max 50"):
|
||||
await tool_fn(
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={f"key{i}": "v" for i in range(51)},
|
||||
)
|
||||
|
||||
async def test_dict_value_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="string, number, or boolean"):
|
||||
await tool_fn(
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"nested": {"key": "val"}},
|
||||
)
|
||||
|
||||
async def test_value_too_long_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="exceeds max length"):
|
||||
await tool_fn(
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"key": "x" * 4097},
|
||||
)
|
||||
|
||||
async def test_boolean_value_accepted(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"rclone": {"createRCloneRemote": {"name": "r", "type": "s3"}}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="s3",
|
||||
config_data={"use_path_style": True},
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
async def test_int_value_accepted(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"rclone": {"createRCloneRemote": {"name": "r", "type": "sftp"}}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="rclone",
|
||||
subaction="create_remote",
|
||||
name="r",
|
||||
provider_type="sftp",
|
||||
config_data={"port": 22},
|
||||
)
|
||||
assert result["success"] is True
|
||||
|
||||
155
tests/test_resources.py
Normal file
155
tests/test_resources.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""Tests for MCP subscription resources."""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from unraid_mcp.subscriptions.queries import SNAPSHOT_ACTIONS
|
||||
from unraid_mcp.subscriptions.resources import register_subscription_resources
|
||||
|
||||
|
||||
def _make_resources():
|
||||
"""Register resources on a test FastMCP instance and return it."""
|
||||
test_mcp = FastMCP("test")
|
||||
register_subscription_resources(test_mcp)
|
||||
return test_mcp
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_ensure_started():
|
||||
with patch(
|
||||
"unraid_mcp.subscriptions.resources.ensure_subscriptions_started",
|
||||
new_callable=AsyncMock,
|
||||
) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
class TestLiveResourcesUseManagerCache:
|
||||
"""All live resources must read from the persistent SubscriptionManager cache."""
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_resource_returns_cached_data(self, action: str) -> None:
|
||||
cached = {"systemMetricsCpu": {"percentTotal": 12.5}}
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=cached)
|
||||
mcp = _make_resources()
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
assert json.loads(result) == cached
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_resource_returns_connecting_when_no_cache_and_no_error(
|
||||
self, action: str
|
||||
) -> None:
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mock_mgr.last_error = {}
|
||||
mcp = _make_resources()
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
parsed = json.loads(result)
|
||||
assert parsed["status"] == "connecting"
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_resource_returns_error_status_on_permanent_failure(self, action: str) -> None:
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mock_mgr.last_error = {action: "WebSocket auth failed"}
|
||||
mcp = _make_resources()
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
parsed = json.loads(result)
|
||||
assert parsed["status"] == "error"
|
||||
assert "auth failed" in parsed["message"]
|
||||
|
||||
|
||||
class TestSnapshotSubscriptionsRegistered:
|
||||
"""All SNAPSHOT_ACTIONS must be registered in the SubscriptionManager with auto_start=True."""
|
||||
|
||||
def test_all_snapshot_actions_in_configs(self) -> None:
|
||||
from unraid_mcp.subscriptions.manager import subscription_manager
|
||||
|
||||
for action in SNAPSHOT_ACTIONS:
|
||||
assert action in subscription_manager.subscription_configs, (
|
||||
f"'{action}' not registered in subscription_configs"
|
||||
)
|
||||
|
||||
def test_all_snapshot_actions_autostart(self) -> None:
|
||||
from unraid_mcp.subscriptions.manager import subscription_manager
|
||||
|
||||
for action in SNAPSHOT_ACTIONS:
|
||||
config = subscription_manager.subscription_configs[action]
|
||||
assert config.get("auto_start") is True, (
|
||||
f"'{action}' missing auto_start=True in subscription_configs"
|
||||
)
|
||||
|
||||
|
||||
class TestLogsStreamResource:
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_logs_stream_no_data(self) -> None:
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mcp = _make_resources()
|
||||
local_provider = mcp.providers[0]
|
||||
resource = local_provider._components["resource:unraid://logs/stream@"]
|
||||
result = await resource.fn()
|
||||
parsed = json.loads(result)
|
||||
assert "status" in parsed
|
||||
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_logs_stream_returns_data_with_empty_dict(self) -> None:
|
||||
"""Empty dict cache hit must return data, not 'connecting' status."""
|
||||
with patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr:
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value={})
|
||||
mcp = _make_resources()
|
||||
local_provider = mcp.providers[0]
|
||||
resource = local_provider._components["resource:unraid://logs/stream@"]
|
||||
result = await resource.fn()
|
||||
assert json.loads(result) == {}
|
||||
|
||||
|
||||
class TestAutoStartDisabledFallback:
|
||||
"""When auto_start is disabled, resources fall back to on-demand subscribe_once."""
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_fallback_returns_subscribe_once_data(self, action: str) -> None:
|
||||
fallback_data = {"systemMetricsCpu": {"percentTotal": 42.0}}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr,
|
||||
patch(
|
||||
"unraid_mcp.subscriptions.resources.subscribe_once",
|
||||
new=AsyncMock(return_value=fallback_data),
|
||||
),
|
||||
):
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mock_mgr.last_error = {}
|
||||
mock_mgr.auto_start_enabled = False
|
||||
mcp = _make_resources()
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
assert json.loads(result) == fallback_data
|
||||
|
||||
@pytest.mark.parametrize("action", list(SNAPSHOT_ACTIONS.keys()))
|
||||
@pytest.mark.usefixtures("_mock_ensure_started")
|
||||
async def test_fallback_failure_returns_connecting(self, action: str) -> None:
|
||||
"""When on-demand fallback itself fails, still return 'connecting' status."""
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.resources.subscription_manager") as mock_mgr,
|
||||
patch(
|
||||
"unraid_mcp.subscriptions.resources.subscribe_once",
|
||||
new=AsyncMock(side_effect=Exception("WebSocket failed")),
|
||||
),
|
||||
):
|
||||
mock_mgr.get_resource_data = AsyncMock(return_value=None)
|
||||
mock_mgr.last_error = {}
|
||||
mock_mgr.auto_start_enabled = False
|
||||
mcp = _make_resources()
|
||||
resource = mcp.providers[0]._components[f"resource:unraid://live/{action}@"]
|
||||
result = await resource.fn()
|
||||
assert json.loads(result)["status"] == "connecting"
|
||||
116
tests/test_settings.py
Normal file
116
tests/test_settings.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""Tests for the setting subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Regression: removed subactions must raise Invalid subaction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"subaction",
|
||||
[
|
||||
"update_temperature",
|
||||
"update_time",
|
||||
"update_api",
|
||||
"connect_sign_in",
|
||||
"connect_sign_out",
|
||||
"setup_remote_access",
|
||||
"enable_dynamic_remote_access",
|
||||
"update_ssh",
|
||||
],
|
||||
)
|
||||
async def test_removed_settings_subactions_are_invalid(subaction: str) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="setting", subaction=subaction)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Validation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSettingsValidation:
|
||||
"""Tests for subaction validation and destructive guard."""
|
||||
|
||||
async def test_invalid_subaction(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="setting", subaction="nonexistent_action")
|
||||
|
||||
async def test_destructive_configure_ups_requires_confirm(
|
||||
self, _mock_graphql: AsyncMock
|
||||
) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="confirm=True"):
|
||||
await tool_fn(action="setting", subaction="configure_ups", ups_config={"mode": "slave"})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# update
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSettingsUpdate:
|
||||
"""Tests for update subaction."""
|
||||
|
||||
async def test_update_requires_settings_input(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="settings_input is required"):
|
||||
await tool_fn(action="setting", subaction="update")
|
||||
|
||||
async def test_update_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"updateSettings": {"restartRequired": False, "values": {}, "warnings": []}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="setting", subaction="update", settings_input={"shareCount": 5}
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["subaction"] == "update"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# configure_ups
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestUpsConfig:
|
||||
"""Tests for configure_ups subaction."""
|
||||
|
||||
async def test_configure_ups_requires_ups_config(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="ups_config is required"):
|
||||
await tool_fn(action="setting", subaction="configure_ups", confirm=True)
|
||||
|
||||
async def test_configure_ups_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"configureUps": True}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="setting",
|
||||
subaction="configure_ups",
|
||||
confirm=True,
|
||||
ups_config={"mode": "master", "cable": "usb"},
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["subaction"] == "configure_ups"
|
||||
527
tests/test_setup.py
Normal file
527
tests/test_setup.py
Normal file
@@ -0,0 +1,527 @@
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from unraid_mcp.core.exceptions import CredentialsNotConfiguredError, ToolError
|
||||
|
||||
|
||||
def test_credentials_not_configured_error_exists():
|
||||
err = CredentialsNotConfiguredError()
|
||||
assert str(err) == "Unraid credentials are not configured."
|
||||
|
||||
|
||||
def test_credentials_not_configured_error_is_exception():
|
||||
"""CredentialsNotConfiguredError must be catchable as a plain Exception."""
|
||||
with pytest.raises(Exception):
|
||||
raise CredentialsNotConfiguredError()
|
||||
|
||||
|
||||
def test_credentials_not_configured_error_is_not_tool_error():
|
||||
"""CredentialsNotConfiguredError must NOT be a ToolError — it bypasses MCP protocol error handling."""
|
||||
assert not issubclass(CredentialsNotConfiguredError, ToolError)
|
||||
|
||||
|
||||
def test_settings_is_configured_true():
|
||||
from unraid_mcp.config import settings
|
||||
|
||||
with (
|
||||
patch.object(settings, "UNRAID_API_URL", "https://example.com"),
|
||||
patch.object(settings, "UNRAID_API_KEY", "key123"),
|
||||
):
|
||||
assert settings.is_configured() is True
|
||||
|
||||
|
||||
def test_settings_is_configured_false_when_missing():
|
||||
from unraid_mcp.config import settings
|
||||
|
||||
with (
|
||||
patch.object(settings, "UNRAID_API_URL", None),
|
||||
patch.object(settings, "UNRAID_API_KEY", None),
|
||||
):
|
||||
assert settings.is_configured() is False
|
||||
|
||||
|
||||
def test_settings_apply_runtime_config_updates_module_globals():
|
||||
import os
|
||||
|
||||
from unraid_mcp.config import settings
|
||||
|
||||
original_url = settings.UNRAID_API_URL
|
||||
original_key = settings.UNRAID_API_KEY
|
||||
original_env_url = os.environ.get("UNRAID_API_URL")
|
||||
original_env_key = os.environ.get("UNRAID_API_KEY")
|
||||
try:
|
||||
settings.apply_runtime_config("https://newurl.com/graphql", "newkey")
|
||||
assert settings.UNRAID_API_URL == "https://newurl.com/graphql"
|
||||
assert settings.UNRAID_API_KEY == "newkey"
|
||||
assert os.environ["UNRAID_API_URL"] == "https://newurl.com/graphql"
|
||||
assert os.environ["UNRAID_API_KEY"] == "newkey"
|
||||
finally:
|
||||
# Reset module globals
|
||||
settings.UNRAID_API_URL = original_url
|
||||
settings.UNRAID_API_KEY = original_key
|
||||
# Reset os.environ
|
||||
if original_env_url is None:
|
||||
os.environ.pop("UNRAID_API_URL", None)
|
||||
else:
|
||||
os.environ["UNRAID_API_URL"] = original_env_url
|
||||
if original_env_key is None:
|
||||
os.environ.pop("UNRAID_API_KEY", None)
|
||||
else:
|
||||
os.environ["UNRAID_API_KEY"] = original_env_key
|
||||
|
||||
|
||||
def test_run_server_does_not_exit_when_creds_missing(monkeypatch):
|
||||
"""Server should not sys.exit(1) when credentials are absent."""
|
||||
import unraid_mcp.config.settings as settings_mod
|
||||
|
||||
monkeypatch.setattr(settings_mod, "UNRAID_API_URL", None)
|
||||
monkeypatch.setattr(settings_mod, "UNRAID_API_KEY", None)
|
||||
|
||||
from unraid_mcp import server as server_mod
|
||||
|
||||
with (
|
||||
patch.object(server_mod, "mcp") as mock_mcp,
|
||||
patch("unraid_mcp.server.logger") as mock_logger,
|
||||
):
|
||||
mock_mcp.run.side_effect = SystemExit(0)
|
||||
try:
|
||||
server_mod.run_server()
|
||||
except SystemExit as e:
|
||||
assert e.code == 0, f"Unexpected sys.exit({e.code}) — server crashed on missing creds"
|
||||
mock_logger.warning.assert_called()
|
||||
warning_msgs = [call[0][0] for call in mock_logger.warning.call_args_list]
|
||||
assert any("elicitation" in msg for msg in warning_msgs), (
|
||||
f"Expected a warning containing 'elicitation', got: {warning_msgs}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_and_configure_writes_env_file(tmp_path):
|
||||
"""elicit_and_configure writes a .env file and calls apply_runtime_config."""
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from unraid_mcp.core.setup import elicit_and_configure
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "accept"
|
||||
mock_result.data = MagicMock()
|
||||
mock_result.data.api_url = "https://myunraid.example.com/graphql"
|
||||
mock_result.data.api_key = "abc123secret"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
creds_dir = tmp_path / "creds"
|
||||
creds_file = creds_dir / ".env"
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_DIR", creds_dir),
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_ENV_PATH", creds_file),
|
||||
patch("unraid_mcp.core.setup.PROJECT_ROOT", tmp_path),
|
||||
patch("unraid_mcp.core.setup.apply_runtime_config") as mock_apply,
|
||||
):
|
||||
result = await elicit_and_configure(mock_ctx)
|
||||
|
||||
assert result is True
|
||||
assert creds_file.exists()
|
||||
content = creds_file.read_text()
|
||||
assert "UNRAID_API_URL=https://myunraid.example.com/graphql" in content
|
||||
assert "UNRAID_API_KEY=abc123secret" in content
|
||||
mock_apply.assert_called_once_with("https://myunraid.example.com/graphql", "abc123secret")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_and_configure_returns_false_on_decline():
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_and_configure
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "decline"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_and_configure(mock_ctx)
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_and_configure_returns_false_on_cancel():
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_and_configure
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "cancel"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_and_configure(mock_ctx)
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_make_graphql_request_raises_sentinel_when_unconfigured():
|
||||
"""make_graphql_request raises CredentialsNotConfiguredError (not ToolError) when
|
||||
credentials are absent, so callers can trigger elicitation."""
|
||||
from unraid_mcp.config import settings as settings_mod
|
||||
from unraid_mcp.core.client import make_graphql_request
|
||||
from unraid_mcp.core.exceptions import CredentialsNotConfiguredError
|
||||
|
||||
original_url = settings_mod.UNRAID_API_URL
|
||||
original_key = settings_mod.UNRAID_API_KEY
|
||||
try:
|
||||
settings_mod.UNRAID_API_URL = None
|
||||
settings_mod.UNRAID_API_KEY = None
|
||||
with pytest.raises(CredentialsNotConfiguredError):
|
||||
await make_graphql_request("{ __typename }")
|
||||
finally:
|
||||
settings_mod.UNRAID_API_URL = original_url
|
||||
settings_mod.UNRAID_API_KEY = original_key
|
||||
|
||||
|
||||
import os # noqa: E402 — needed for reload-based tests below
|
||||
|
||||
|
||||
def test_credentials_dir_defaults_to_home_unraid_mcp():
|
||||
"""CREDENTIALS_DIR defaults to ~/.unraid-mcp when env var is not set."""
|
||||
import importlib
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
os.environ.pop("UNRAID_CREDENTIALS_DIR", None)
|
||||
try:
|
||||
with patch.dict(os.environ, {}, clear=False):
|
||||
os.environ.pop("UNRAID_CREDENTIALS_DIR", None)
|
||||
importlib.reload(s)
|
||||
assert Path.home() / ".unraid-mcp" == s.CREDENTIALS_DIR
|
||||
finally:
|
||||
importlib.reload(s) # Restore module state
|
||||
|
||||
|
||||
def test_credentials_dir_env_var_override():
|
||||
"""UNRAID_CREDENTIALS_DIR env var overrides the default."""
|
||||
import importlib
|
||||
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
custom = "/tmp/custom-creds"
|
||||
try:
|
||||
with patch.dict(os.environ, {"UNRAID_CREDENTIALS_DIR": custom}):
|
||||
importlib.reload(s)
|
||||
assert Path(custom) == s.CREDENTIALS_DIR
|
||||
finally:
|
||||
# Reload without the custom env var to restore original state
|
||||
os.environ.pop("UNRAID_CREDENTIALS_DIR", None)
|
||||
importlib.reload(s)
|
||||
|
||||
|
||||
def test_credentials_env_path_is_dot_env_inside_credentials_dir():
|
||||
import unraid_mcp.config.settings as s
|
||||
|
||||
assert s.CREDENTIALS_ENV_PATH == s.CREDENTIALS_DIR / ".env"
|
||||
|
||||
|
||||
import stat # noqa: E402
|
||||
|
||||
|
||||
def test_write_env_creates_credentials_dir_with_700_permissions(tmp_path):
|
||||
"""_write_env creates CREDENTIALS_DIR with mode 700 (owner-only)."""
|
||||
from unraid_mcp.core.setup import _write_env
|
||||
|
||||
creds_dir = tmp_path / "creds"
|
||||
creds_file = creds_dir / ".env"
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_DIR", creds_dir),
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_ENV_PATH", creds_file),
|
||||
):
|
||||
_write_env("https://example.com", "mykey")
|
||||
|
||||
assert creds_dir.exists()
|
||||
dir_mode = stat.S_IMODE(creds_dir.stat().st_mode)
|
||||
assert dir_mode == 0o700, f"Expected 0o700, got {oct(dir_mode)}"
|
||||
|
||||
|
||||
def test_write_env_sets_file_permissions_600(tmp_path):
|
||||
"""_write_env sets .env file permissions to 600 (owner read/write only)."""
|
||||
from unraid_mcp.core.setup import _write_env
|
||||
|
||||
creds_dir = tmp_path / "creds"
|
||||
creds_file = creds_dir / ".env"
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_DIR", creds_dir),
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_ENV_PATH", creds_file),
|
||||
):
|
||||
_write_env("https://example.com", "mykey")
|
||||
|
||||
file_mode = stat.S_IMODE(creds_file.stat().st_mode)
|
||||
assert file_mode == 0o600, f"Expected 0o600, got {oct(file_mode)}"
|
||||
|
||||
|
||||
def test_write_env_seeds_from_env_example_on_first_run(tmp_path):
|
||||
"""_write_env copies .env.example structure and replaces credentials in-place."""
|
||||
from unraid_mcp.core.setup import _write_env
|
||||
|
||||
creds_dir = tmp_path / "creds"
|
||||
creds_file = creds_dir / ".env"
|
||||
# Create a fake .env.example
|
||||
example = tmp_path / ".env.example"
|
||||
example.write_text(
|
||||
"# Example config\nFOO=bar\nUNRAID_API_URL=placeholder\nUNRAID_API_KEY=placeholder\n"
|
||||
)
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_DIR", creds_dir),
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_ENV_PATH", creds_file),
|
||||
patch("unraid_mcp.core.setup.PROJECT_ROOT", tmp_path),
|
||||
):
|
||||
_write_env("https://real.url", "realkey")
|
||||
|
||||
content = creds_file.read_text()
|
||||
assert "UNRAID_API_URL=https://real.url" in content
|
||||
assert "UNRAID_API_KEY=realkey" in content
|
||||
assert "# Example config" in content # comment preserved
|
||||
assert "FOO=bar" in content # other vars preserved
|
||||
assert "placeholder" not in content # old credential values replaced
|
||||
# Credentials should be at their original position (after comments), not prepended before them
|
||||
lines = content.splitlines()
|
||||
url_idx = next(i for i, line in enumerate(lines) if line.startswith("UNRAID_API_URL="))
|
||||
comment_idx = next(i for i, line in enumerate(lines) if line.startswith("# Example config"))
|
||||
assert comment_idx < url_idx # Comment comes before credentials
|
||||
|
||||
|
||||
def test_write_env_first_run_no_example_file(tmp_path):
|
||||
"""_write_env works on first run when .env.example does not exist."""
|
||||
from unraid_mcp.core.setup import _write_env
|
||||
|
||||
creds_dir = tmp_path / "creds"
|
||||
creds_file = creds_dir / ".env"
|
||||
# tmp_path has no .env.example
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_DIR", creds_dir),
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_ENV_PATH", creds_file),
|
||||
patch("unraid_mcp.core.setup.PROJECT_ROOT", tmp_path),
|
||||
):
|
||||
_write_env("https://myserver.com", "mykey123")
|
||||
|
||||
assert creds_file.exists()
|
||||
content = creds_file.read_text()
|
||||
assert "UNRAID_API_URL=https://myserver.com" in content
|
||||
assert "UNRAID_API_KEY=mykey123" in content
|
||||
|
||||
|
||||
def test_write_env_updates_existing_credentials_in_place(tmp_path):
|
||||
"""_write_env updates credentials without destroying other vars."""
|
||||
from unraid_mcp.core.setup import _write_env
|
||||
|
||||
creds_dir = tmp_path / "creds"
|
||||
creds_dir.mkdir(mode=0o700)
|
||||
creds_file = creds_dir / ".env"
|
||||
creds_file.write_text(
|
||||
"UNRAID_API_URL=https://old.url\nUNRAID_API_KEY=oldkey\nUNRAID_VERIFY_SSL=false\n"
|
||||
)
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_DIR", creds_dir),
|
||||
patch("unraid_mcp.core.setup.CREDENTIALS_ENV_PATH", creds_file),
|
||||
patch("unraid_mcp.core.setup.PROJECT_ROOT", tmp_path),
|
||||
):
|
||||
_write_env("https://new.url", "newkey")
|
||||
|
||||
content = creds_file.read_text()
|
||||
assert "UNRAID_API_URL=https://new.url" in content
|
||||
assert "UNRAID_API_KEY=newkey" in content
|
||||
assert "UNRAID_VERIFY_SSL=false" in content # preserved
|
||||
assert "old" not in content
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_and_configure_returns_false_when_client_not_supported():
|
||||
"""elicit_and_configure returns False when client raises NotImplementedError."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_and_configure
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_ctx.elicit = AsyncMock(side_effect=NotImplementedError("elicitation not supported"))
|
||||
|
||||
result = await elicit_and_configure(mock_ctx)
|
||||
assert result is False
|
||||
|
||||
|
||||
def test_tool_error_handler_converts_credentials_not_configured_to_tool_error():
|
||||
"""tool_error_handler wraps CredentialsNotConfiguredError in a ToolError."""
|
||||
import logging
|
||||
|
||||
from unraid_mcp.core.exceptions import (
|
||||
CredentialsNotConfiguredError,
|
||||
ToolError,
|
||||
tool_error_handler,
|
||||
)
|
||||
|
||||
_log = logging.getLogger("test")
|
||||
with pytest.raises(ToolError), tool_error_handler("docker", "list", _log):
|
||||
raise CredentialsNotConfiguredError()
|
||||
|
||||
|
||||
def test_tool_error_handler_credentials_error_message_includes_path():
|
||||
"""ToolError from CredentialsNotConfiguredError includes the credentials path."""
|
||||
import logging
|
||||
|
||||
from unraid_mcp.config.settings import CREDENTIALS_ENV_PATH
|
||||
from unraid_mcp.core.exceptions import (
|
||||
CredentialsNotConfiguredError,
|
||||
ToolError,
|
||||
tool_error_handler,
|
||||
)
|
||||
|
||||
_log = logging.getLogger("test")
|
||||
with pytest.raises(ToolError) as exc_info, tool_error_handler("docker", "list", _log):
|
||||
raise CredentialsNotConfiguredError()
|
||||
|
||||
assert str(CREDENTIALS_ENV_PATH) in str(exc_info.value)
|
||||
assert "setup" in str(exc_info.value).lower()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# elicit_reset_confirmation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_false_when_ctx_none():
|
||||
"""Returns False immediately when no MCP context is available."""
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
result = await elicit_reset_confirmation(None, "https://example.com")
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_true_when_user_confirms():
|
||||
"""Returns True when the user accepts and answers True."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "accept"
|
||||
mock_result.data = True
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_false_when_user_answers_false():
|
||||
"""Returns False when the user accepts but answers False (does not want to reset)."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "accept"
|
||||
mock_result.data = False
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_false_when_declined():
|
||||
"""Returns False when the user declines via action (dismisses the prompt)."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "decline"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_false_when_cancelled():
|
||||
"""Returns False when the user cancels the prompt."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "cancel"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_returns_true_when_not_implemented():
|
||||
"""Returns True (proceed with reset) when the MCP client does not support elicitation.
|
||||
|
||||
Non-interactive clients (stdio, CI) must not be permanently blocked from
|
||||
reconfiguring credentials just because they can't ask the user a yes/no question.
|
||||
"""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_ctx.elicit = AsyncMock(side_effect=NotImplementedError("elicitation not supported"))
|
||||
|
||||
result = await elicit_reset_confirmation(mock_ctx, "https://example.com")
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_elicit_reset_confirmation_includes_current_url_in_prompt():
|
||||
"""The elicitation message includes the current URL so the user knows what they're replacing."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from unraid_mcp.core.setup import elicit_reset_confirmation
|
||||
|
||||
mock_ctx = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.action = "decline"
|
||||
mock_ctx.elicit = AsyncMock(return_value=mock_result)
|
||||
|
||||
await elicit_reset_confirmation(mock_ctx, "https://my-unraid.example.com:31337")
|
||||
|
||||
call_kwargs = mock_ctx.elicit.call_args
|
||||
message = call_kwargs.kwargs.get("message") or call_kwargs.args[0]
|
||||
assert "https://my-unraid.example.com:31337" in message
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_credentials_not_configured_surfaces_as_tool_error_with_path():
|
||||
"""CredentialsNotConfiguredError from a tool becomes ToolError with the credentials path."""
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
from tests.conftest import make_tool_fn
|
||||
from unraid_mcp.config.settings import CREDENTIALS_ENV_PATH
|
||||
from unraid_mcp.core.exceptions import CredentialsNotConfiguredError, ToolError
|
||||
|
||||
tool_fn = make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
with (
|
||||
patch(
|
||||
"unraid_mcp.tools.unraid.make_graphql_request",
|
||||
new=AsyncMock(side_effect=CredentialsNotConfiguredError()),
|
||||
),
|
||||
pytest.raises(ToolError) as exc_info,
|
||||
):
|
||||
await tool_fn(action="user", subaction="me")
|
||||
|
||||
assert str(CREDENTIALS_ENV_PATH) in str(exc_info.value)
|
||||
125
tests/test_snapshot.py
Normal file
125
tests/test_snapshot.py
Normal file
@@ -0,0 +1,125 @@
|
||||
# tests/test_snapshot.py
|
||||
"""Tests for subscribe_once() and subscribe_collect() snapshot helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def _make_ws_message(sub_id: str, data: dict, proto: str = "graphql-transport-ws") -> str:
|
||||
msg_type = "next" if proto == "graphql-transport-ws" else "data"
|
||||
return json.dumps({"id": sub_id, "type": msg_type, "payload": {"data": data}})
|
||||
|
||||
|
||||
def _make_ws_recv_sequence(*messages: str):
|
||||
"""Build an async iterator that yields strings then hangs."""
|
||||
|
||||
async def _gen():
|
||||
for m in messages:
|
||||
yield m
|
||||
# hang — simulates no more messages
|
||||
await asyncio.Event().wait()
|
||||
|
||||
return _gen()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_ws():
|
||||
ws = MagicMock()
|
||||
ws.subprotocol = "graphql-transport-ws"
|
||||
ws.send = AsyncMock()
|
||||
return ws
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_subscribe_once_returns_first_event(mock_ws):
|
||||
"""subscribe_once returns data from the first matching event."""
|
||||
from unraid_mcp.subscriptions.snapshot import subscribe_once
|
||||
|
||||
ack = json.dumps({"type": "connection_ack"})
|
||||
data_msg = _make_ws_message("snapshot-1", {"systemMetricsCpu": {"percentTotal": 42.0}})
|
||||
mock_ws.__aiter__ = lambda s: aiter([data_msg])
|
||||
mock_ws.recv = AsyncMock(return_value=ack)
|
||||
|
||||
async def aiter(items):
|
||||
for item in items:
|
||||
yield item
|
||||
|
||||
with patch("unraid_mcp.subscriptions.snapshot.websockets.connect") as mock_connect:
|
||||
mock_connect.return_value.__aenter__ = AsyncMock(return_value=mock_ws)
|
||||
mock_connect.return_value.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
result = await subscribe_once("subscription { systemMetricsCpu { percentTotal } }")
|
||||
|
||||
assert result == {"systemMetricsCpu": {"percentTotal": 42.0}}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_subscribe_once_raises_on_graphql_error(mock_ws):
|
||||
"""subscribe_once raises ToolError when server returns GraphQL errors."""
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.subscriptions.snapshot import subscribe_once
|
||||
|
||||
ack = json.dumps({"type": "connection_ack"})
|
||||
error_msg = json.dumps(
|
||||
{
|
||||
"id": "snapshot-1",
|
||||
"type": "next",
|
||||
"payload": {"errors": [{"message": "Not authorized"}]},
|
||||
}
|
||||
)
|
||||
|
||||
async def aiter(items):
|
||||
for item in items:
|
||||
yield item
|
||||
|
||||
mock_ws.__aiter__ = lambda s: aiter([error_msg])
|
||||
mock_ws.recv = AsyncMock(return_value=ack)
|
||||
|
||||
with patch("unraid_mcp.subscriptions.snapshot.websockets.connect") as mock_connect:
|
||||
mock_connect.return_value.__aenter__ = AsyncMock(return_value=mock_ws)
|
||||
mock_connect.return_value.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with pytest.raises(ToolError, match="Not authorized"):
|
||||
await subscribe_once("subscription { systemMetricsCpu { percentTotal } }")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_subscribe_collect_returns_multiple_events(mock_ws):
|
||||
"""subscribe_collect returns a list of events received within the window."""
|
||||
from unraid_mcp.subscriptions.snapshot import subscribe_collect
|
||||
|
||||
ack = json.dumps({"type": "connection_ack"})
|
||||
msg1 = _make_ws_message("snapshot-1", {"notificationAdded": {"id": "1", "title": "A"}})
|
||||
msg2 = _make_ws_message("snapshot-1", {"notificationAdded": {"id": "2", "title": "B"}})
|
||||
|
||||
async def aiter(items):
|
||||
for item in items:
|
||||
yield item
|
||||
await asyncio.sleep(10) # hang after messages
|
||||
|
||||
mock_ws.__aiter__ = lambda s: aiter([msg1, msg2])
|
||||
mock_ws.recv = AsyncMock(return_value=ack)
|
||||
|
||||
with patch("unraid_mcp.subscriptions.snapshot.websockets.connect") as mock_connect:
|
||||
mock_connect.return_value.__aenter__ = AsyncMock(return_value=mock_ws)
|
||||
mock_connect.return_value.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
result = await subscribe_collect(
|
||||
"subscription { notificationAdded { id title } }",
|
||||
collect_for=0.1,
|
||||
)
|
||||
|
||||
assert len(result) == 2
|
||||
assert result[0]["notificationAdded"]["id"] == "1"
|
||||
|
||||
|
||||
def test_snapshot_actions_importable_from_subscriptions() -> None:
|
||||
from unraid_mcp.subscriptions.queries import COLLECT_ACTIONS, SNAPSHOT_ACTIONS
|
||||
|
||||
assert "cpu" in SNAPSHOT_ACTIONS
|
||||
assert "log_tail" in COLLECT_ACTIONS
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_storage tool."""
|
||||
"""Tests for disk subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -7,7 +7,7 @@ import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.tools.storage import format_bytes
|
||||
from unraid_mcp.core.utils import format_bytes, format_kb, safe_get
|
||||
|
||||
|
||||
# --- Unit tests for helpers ---
|
||||
@@ -38,45 +38,130 @@ class TestFormatBytes:
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.storage.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestStorageValidation:
|
||||
async def test_disk_details_requires_disk_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="disk_id"):
|
||||
await tool_fn(action="disk_details")
|
||||
await tool_fn(action="disk", subaction="disk_details")
|
||||
|
||||
async def test_logs_requires_log_path(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="log_path"):
|
||||
await tool_fn(action="logs")
|
||||
await tool_fn(action="disk", subaction="logs")
|
||||
|
||||
async def test_logs_rejects_invalid_path(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="log_path must start with"):
|
||||
await tool_fn(action="logs", log_path="/etc/shadow")
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/etc/shadow")
|
||||
|
||||
async def test_logs_rejects_path_traversal(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
# Traversal that escapes /var/log/ to reach /etc/shadow
|
||||
with pytest.raises(ToolError, match="log_path must start with"):
|
||||
await tool_fn(action="logs", log_path="/var/log/../../etc/shadow")
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/var/log/../../etc/shadow")
|
||||
# Traversal that escapes /mnt/ to reach /etc/passwd
|
||||
with pytest.raises(ToolError, match="log_path must start with"):
|
||||
await tool_fn(action="logs", log_path="/mnt/../etc/passwd")
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/mnt/../etc/passwd")
|
||||
|
||||
async def test_logs_allows_valid_paths(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "ok"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
||||
result = await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog")
|
||||
assert result["content"] == "ok"
|
||||
|
||||
async def test_logs_tail_lines_too_large(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="tail_lines must be between"):
|
||||
await tool_fn(
|
||||
action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=10_001
|
||||
)
|
||||
|
||||
async def test_logs_tail_lines_zero_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="tail_lines must be between"):
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=0)
|
||||
|
||||
async def test_logs_tail_lines_at_max_accepted(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "ok"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="disk", subaction="logs", log_path="/var/log/syslog", tail_lines=10_000
|
||||
)
|
||||
assert result["content"] == "ok"
|
||||
|
||||
async def test_non_logs_action_ignores_tail_lines_validation(
|
||||
self, _mock_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_graphql.return_value = {"shares": []}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disk", subaction="shares", tail_lines=0)
|
||||
assert result["shares"] == []
|
||||
|
||||
|
||||
class TestFormatKb:
|
||||
def test_none_returns_na(self) -> None:
|
||||
assert format_kb(None) == "N/A"
|
||||
|
||||
def test_invalid_string_returns_na(self) -> None:
|
||||
assert format_kb("not-a-number") == "N/A"
|
||||
|
||||
def test_kilobytes_range(self) -> None:
|
||||
assert format_kb(512) == "512.00 KB"
|
||||
|
||||
def test_megabytes_range(self) -> None:
|
||||
assert format_kb(2048) == "2.00 MB"
|
||||
|
||||
def test_gigabytes_range(self) -> None:
|
||||
assert format_kb(1_048_576) == "1.00 GB"
|
||||
|
||||
def test_terabytes_range(self) -> None:
|
||||
assert format_kb(1_073_741_824) == "1.00 TB"
|
||||
|
||||
def test_boundary_exactly_1024_kb(self) -> None:
|
||||
# 1024 KB = 1 MB
|
||||
assert format_kb(1024) == "1.00 MB"
|
||||
|
||||
|
||||
class TestSafeGet:
|
||||
def test_simple_key_access(self) -> None:
|
||||
assert safe_get({"a": 1}, "a") == 1
|
||||
|
||||
def test_nested_key_access(self) -> None:
|
||||
assert safe_get({"a": {"b": "val"}}, "a", "b") == "val"
|
||||
|
||||
def test_missing_key_returns_none(self) -> None:
|
||||
assert safe_get({"a": 1}, "missing") is None
|
||||
|
||||
def test_none_intermediate_returns_default(self) -> None:
|
||||
assert safe_get({"a": None}, "a", "b") is None
|
||||
|
||||
def test_custom_default_returned(self) -> None:
|
||||
assert safe_get({}, "x", default="fallback") == "fallback"
|
||||
|
||||
def test_non_dict_intermediate_returns_default(self) -> None:
|
||||
assert safe_get({"a": "string"}, "a", "b") is None
|
||||
|
||||
def test_empty_list_default(self) -> None:
|
||||
result = safe_get({}, "missing", default=[])
|
||||
assert result == []
|
||||
|
||||
def test_zero_value_not_replaced_by_default(self) -> None:
|
||||
assert safe_get({"temp": 0}, "temp", default="N/A") == 0
|
||||
|
||||
def test_false_value_not_replaced_by_default(self) -> None:
|
||||
assert safe_get({"active": False}, "active", default=True) is False
|
||||
|
||||
def test_empty_string_not_replaced_by_default(self) -> None:
|
||||
assert safe_get({"name": ""}, "name", default="unknown") == ""
|
||||
|
||||
|
||||
class TestStorageActions:
|
||||
async def test_shares(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -84,13 +169,13 @@ class TestStorageActions:
|
||||
"shares": [{"id": "s:1", "name": "media"}, {"id": "s:2", "name": "backups"}]
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="shares")
|
||||
result = await tool_fn(action="disk", subaction="shares")
|
||||
assert len(result["shares"]) == 2
|
||||
|
||||
async def test_disks(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"disks": [{"id": "d:1", "device": "sda"}]}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disks")
|
||||
result = await tool_fn(action="disk", subaction="disks")
|
||||
assert len(result["disks"]) == 1
|
||||
|
||||
async def test_disk_details(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -105,7 +190,7 @@ class TestStorageActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disk_details", disk_id="d:1")
|
||||
result = await tool_fn(action="disk", subaction="disk_details", disk_id="d:1")
|
||||
assert result["summary"]["temperature"] == "35\u00b0C"
|
||||
assert "1.00 GB" in result["summary"]["size_formatted"]
|
||||
|
||||
@@ -122,7 +207,7 @@ class TestStorageActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disk_details", disk_id="d:1")
|
||||
result = await tool_fn(action="disk", subaction="disk_details", disk_id="d:1")
|
||||
assert result["summary"]["temperature"] == "0\u00b0C"
|
||||
|
||||
async def test_disk_details_temperature_null(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -138,32 +223,26 @@ class TestStorageActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="disk_details", disk_id="d:1")
|
||||
result = await tool_fn(action="disk", subaction="disk_details", disk_id="d:1")
|
||||
assert result["summary"]["temperature"] == "N/A"
|
||||
|
||||
async def test_logs_null_log_file(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""logFile being null should return an empty dict."""
|
||||
_mock_graphql.return_value = {"logFile": None}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
||||
result = await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog")
|
||||
assert result == {}
|
||||
|
||||
async def test_disk_details_not_found(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"disk": None}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="not found"):
|
||||
await tool_fn(action="disk_details", disk_id="d:missing")
|
||||
|
||||
async def test_unassigned(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"unassignedDevices": []}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="unassigned")
|
||||
assert result["devices"] == []
|
||||
await tool_fn(action="disk", subaction="disk_details", disk_id="d:missing")
|
||||
|
||||
async def test_log_files(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"logFiles": [{"name": "syslog", "path": "/var/log/syslog"}]}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="log_files")
|
||||
result = await tool_fn(action="disk", subaction="log_files")
|
||||
assert len(result["log_files"]) == 1
|
||||
|
||||
async def test_logs(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -171,7 +250,7 @@ class TestStorageActions:
|
||||
"logFile": {"path": "/var/log/syslog", "content": "log line", "totalLines": 1}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
||||
result = await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog")
|
||||
assert result["content"] == "log line"
|
||||
|
||||
|
||||
@@ -185,7 +264,7 @@ class TestStorageNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid JSON"):
|
||||
await tool_fn(action="logs", log_path="/var/log/syslog")
|
||||
await tool_fn(action="disk", subaction="logs", log_path="/var/log/syslog")
|
||||
|
||||
async def test_shares_connection_refused(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Connection refused when listing shares should propagate as ToolError."""
|
||||
@@ -194,11 +273,73 @@ class TestStorageNetworkErrors:
|
||||
)
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Connection refused"):
|
||||
await tool_fn(action="shares")
|
||||
await tool_fn(action="disk", subaction="shares")
|
||||
|
||||
async def test_disks_http_500(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""HTTP 500 when listing disks should propagate as ToolError."""
|
||||
_mock_graphql.side_effect = ToolError("HTTP error 500: Internal Server Error")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="HTTP error 500"):
|
||||
await tool_fn(action="disks")
|
||||
await tool_fn(action="disk", subaction="disks")
|
||||
|
||||
|
||||
class TestStorageFlashBackup:
|
||||
async def test_flash_backup_requires_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
destination_path="r:b",
|
||||
)
|
||||
|
||||
async def test_flash_backup_requires_remote_name(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="remote_name"):
|
||||
await tool_fn(action="disk", subaction="flash_backup", confirm=True)
|
||||
|
||||
async def test_flash_backup_requires_source_path(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="source_path"):
|
||||
await tool_fn(action="disk", subaction="flash_backup", confirm=True, remote_name="r")
|
||||
|
||||
async def test_flash_backup_requires_destination_path(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="destination_path"):
|
||||
await tool_fn(
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
confirm=True,
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
)
|
||||
|
||||
async def test_flash_backup_success(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"initiateFlashBackup": {"status": "started", "jobId": "j:1"}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
confirm=True,
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
destination_path="r:b",
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert result["data"]["status"] == "started"
|
||||
|
||||
async def test_flash_backup_passes_options(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"initiateFlashBackup": {"status": "started", "jobId": "j:2"}}
|
||||
tool_fn = _make_tool()
|
||||
await tool_fn(
|
||||
action="disk",
|
||||
subaction="flash_backup",
|
||||
confirm=True,
|
||||
remote_name="r",
|
||||
source_path="/boot",
|
||||
destination_path="r:b",
|
||||
backup_options={"dryRun": True},
|
||||
)
|
||||
assert _mock_graphql.call_args[0][1]["input"]["options"] == {"dryRun": True}
|
||||
|
||||
156
tests/test_subscription_manager.py
Normal file
156
tests/test_subscription_manager.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""Tests for _cap_log_content in subscriptions/manager.py.
|
||||
|
||||
_cap_log_content is a pure utility that prevents unbounded memory growth from
|
||||
log subscription data. It must: return a NEW dict (not mutate), recursively
|
||||
cap nested 'content' fields, and only truncate when both byte limit and line
|
||||
limit are exceeded.
|
||||
"""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
from unraid_mcp.subscriptions.manager import _cap_log_content
|
||||
|
||||
|
||||
class TestCapLogContentImmutability:
|
||||
"""The function must return a new dict — never mutate the input."""
|
||||
|
||||
def test_returns_new_dict(self) -> None:
|
||||
data = {"key": "value"}
|
||||
result = _cap_log_content(data)
|
||||
assert result is not data
|
||||
|
||||
def test_input_not_mutated_on_passthrough(self) -> None:
|
||||
data = {"content": "short text", "other": "value"}
|
||||
original_content = data["content"]
|
||||
_cap_log_content(data)
|
||||
assert data["content"] == original_content
|
||||
|
||||
def test_input_not_mutated_on_truncation(self) -> None:
|
||||
# Use small limits so the truncation path is exercised
|
||||
large_content = "\n".join(f"line {i}" for i in range(200))
|
||||
data = {"content": large_content}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
_cap_log_content(data)
|
||||
# Original data must be unchanged
|
||||
assert data["content"] == large_content
|
||||
|
||||
|
||||
class TestCapLogContentSmallData:
|
||||
"""Content below the byte limit must be returned unchanged."""
|
||||
|
||||
def test_small_content_unchanged(self) -> None:
|
||||
data = {"content": "just a few lines\nof log data\n"}
|
||||
result = _cap_log_content(data)
|
||||
assert result["content"] == data["content"]
|
||||
|
||||
def test_non_content_keys_passed_through(self) -> None:
|
||||
data = {"name": "cpu_subscription", "timestamp": "2026-02-18T00:00:00Z"}
|
||||
result = _cap_log_content(data)
|
||||
assert result == data
|
||||
|
||||
def test_integer_value_passed_through(self) -> None:
|
||||
data = {"count": 42, "active": True}
|
||||
result = _cap_log_content(data)
|
||||
assert result == data
|
||||
|
||||
|
||||
class TestCapLogContentTruncation:
|
||||
"""Content exceeding both byte AND line limits must be truncated to the last N lines."""
|
||||
|
||||
def test_oversized_content_truncated_and_byte_capped(self) -> None:
|
||||
# 200 lines, tiny byte limit: must keep recent content within byte cap.
|
||||
lines = [f"line {i}" for i in range(200)]
|
||||
data = {"content": "\n".join(lines)}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
result_lines = result["content"].splitlines()
|
||||
assert len(result["content"].encode("utf-8", errors="replace")) <= 10
|
||||
# Must keep the most recent line suffix.
|
||||
assert result_lines[-1] == "line 199"
|
||||
|
||||
def test_content_with_fewer_lines_than_limit_still_honors_byte_cap(self) -> None:
|
||||
"""If byte limit is exceeded, output must still be capped even with few lines."""
|
||||
# 30 lines, byte limit 10, line limit 50 -> must cap bytes regardless of line count
|
||||
lines = [f"line {i}" for i in range(30)]
|
||||
data = {"content": "\n".join(lines)}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
assert len(result["content"].encode("utf-8", errors="replace")) <= 10
|
||||
|
||||
def test_non_content_keys_preserved_alongside_truncated_content(self) -> None:
|
||||
lines = [f"line {i}" for i in range(200)]
|
||||
data = {"content": "\n".join(lines), "path": "/var/log/syslog", "total_lines": 200}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
assert result["path"] == "/var/log/syslog"
|
||||
assert result["total_lines"] == 200
|
||||
assert len(result["content"].encode("utf-8", errors="replace")) <= 10
|
||||
|
||||
|
||||
class TestCapLogContentNested:
|
||||
"""Nested 'content' fields inside sub-dicts must also be capped recursively."""
|
||||
|
||||
def test_nested_content_field_capped(self) -> None:
|
||||
lines = [f"line {i}" for i in range(200)]
|
||||
data = {"logFile": {"content": "\n".join(lines), "path": "/var/log/syslog"}}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
assert len(result["logFile"]["content"].encode("utf-8", errors="replace")) <= 10
|
||||
assert result["logFile"]["path"] == "/var/log/syslog"
|
||||
|
||||
def test_deeply_nested_content_capped(self) -> None:
|
||||
lines = [f"line {i}" for i in range(200)]
|
||||
data = {"outer": {"inner": {"content": "\n".join(lines)}}}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
assert len(result["outer"]["inner"]["content"].encode("utf-8", errors="replace")) <= 10
|
||||
|
||||
def test_nested_non_content_keys_unaffected(self) -> None:
|
||||
data = {"metrics": {"cpu": 42.5, "memory": 8192}}
|
||||
result = _cap_log_content(data)
|
||||
assert result == data
|
||||
|
||||
|
||||
class TestCapLogContentSingleMassiveLine:
|
||||
"""A single line larger than the byte cap must be hard-capped at byte level."""
|
||||
|
||||
def test_single_massive_line_hard_caps_bytes(self) -> None:
|
||||
# One line, no newlines, larger than the byte cap.
|
||||
# The while-loop can't reduce it (len(lines) == 1), so the
|
||||
# last-resort byte-slice path at manager.py:65-69 must fire.
|
||||
huge_content = "x" * 200
|
||||
data = {"content": huge_content}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 5_000),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
assert len(result["content"].encode("utf-8", errors="replace")) <= 10
|
||||
|
||||
def test_single_massive_line_input_not_mutated(self) -> None:
|
||||
huge_content = "x" * 200
|
||||
data = {"content": huge_content}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 5_000),
|
||||
):
|
||||
_cap_log_content(data)
|
||||
assert data["content"] == huge_content
|
||||
131
tests/test_subscription_validation.py
Normal file
131
tests/test_subscription_validation.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""Tests for _validate_subscription_query in diagnostics.py.
|
||||
|
||||
Security-critical: this function is the only guard against arbitrary GraphQL
|
||||
operations (mutations, queries) being sent over the WebSocket subscription channel.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.subscriptions.diagnostics import (
|
||||
_ALLOWED_SUBSCRIPTION_FIELDS,
|
||||
_validate_subscription_query,
|
||||
)
|
||||
|
||||
|
||||
class TestValidateSubscriptionQueryAllowed:
|
||||
"""All whitelisted subscription names must be accepted."""
|
||||
|
||||
@pytest.mark.parametrize("sub_name", sorted(_ALLOWED_SUBSCRIPTION_FIELDS))
|
||||
def test_all_allowed_names_accepted(self, sub_name: str) -> None:
|
||||
query = f"subscription {{ {sub_name} {{ data }} }}"
|
||||
result = _validate_subscription_query(query)
|
||||
assert result == sub_name
|
||||
|
||||
def test_returns_extracted_subscription_name(self) -> None:
|
||||
query = "subscription { cpu { usage } }"
|
||||
assert _validate_subscription_query(query) == "cpu"
|
||||
|
||||
def test_leading_whitespace_accepted(self) -> None:
|
||||
query = " subscription { memory { free } }"
|
||||
assert _validate_subscription_query(query) == "memory"
|
||||
|
||||
def test_multiline_query_accepted(self) -> None:
|
||||
query = "subscription {\n logFile {\n content\n }\n}"
|
||||
assert _validate_subscription_query(query) == "logFile"
|
||||
|
||||
def test_case_insensitive_subscription_keyword(self) -> None:
|
||||
"""'SUBSCRIPTION' should be accepted (regex uses IGNORECASE)."""
|
||||
query = "SUBSCRIPTION { cpu { usage } }"
|
||||
assert _validate_subscription_query(query) == "cpu"
|
||||
|
||||
|
||||
class TestValidateSubscriptionQueryForbiddenKeywords:
|
||||
"""Queries containing 'mutation' or 'query' as standalone keywords must be rejected."""
|
||||
|
||||
def test_mutation_keyword_rejected(self) -> None:
|
||||
query = 'mutation { docker { start(id: "abc") } }'
|
||||
with pytest.raises(ToolError, match="must be a subscription"):
|
||||
_validate_subscription_query(query)
|
||||
|
||||
def test_query_keyword_rejected(self) -> None:
|
||||
query = "query { info { os { platform } } }"
|
||||
with pytest.raises(ToolError, match="must be a subscription"):
|
||||
_validate_subscription_query(query)
|
||||
|
||||
def test_mutation_embedded_in_subscription_rejected(self) -> None:
|
||||
"""'mutation' anywhere in the string triggers rejection."""
|
||||
query = "subscription { cpuSubscription { mutation data } }"
|
||||
with pytest.raises(ToolError, match="must be a subscription"):
|
||||
_validate_subscription_query(query)
|
||||
|
||||
def test_query_embedded_in_subscription_rejected(self) -> None:
|
||||
query = "subscription { cpuSubscription { query data } }"
|
||||
with pytest.raises(ToolError, match="must be a subscription"):
|
||||
_validate_subscription_query(query)
|
||||
|
||||
def test_mutation_case_insensitive_rejection(self) -> None:
|
||||
query = 'MUTATION { docker { start(id: "abc") } }'
|
||||
with pytest.raises(ToolError, match="must be a subscription"):
|
||||
_validate_subscription_query(query)
|
||||
|
||||
def test_mutation_field_identifier_not_rejected(self) -> None:
|
||||
"""'mutationField' as an identifier must NOT be rejected — only standalone 'mutation'."""
|
||||
# This tests the \b word boundary in _FORBIDDEN_KEYWORDS
|
||||
query = "subscription { cpu { mutationField } }"
|
||||
# Should not raise — "mutationField" is an identifier, not the keyword
|
||||
result = _validate_subscription_query(query)
|
||||
assert result == "cpu"
|
||||
|
||||
def test_query_field_identifier_not_rejected(self) -> None:
|
||||
"""'queryResult' as an identifier must NOT be rejected."""
|
||||
query = "subscription { cpu { queryResult } }"
|
||||
result = _validate_subscription_query(query)
|
||||
assert result == "cpu"
|
||||
|
||||
|
||||
class TestValidateSubscriptionQueryInvalidFormat:
|
||||
"""Queries that don't match the expected subscription format must be rejected."""
|
||||
|
||||
def test_empty_string_rejected(self) -> None:
|
||||
with pytest.raises(ToolError, match="must start with 'subscription'"):
|
||||
_validate_subscription_query("")
|
||||
|
||||
def test_plain_identifier_rejected(self) -> None:
|
||||
with pytest.raises(ToolError, match="must start with 'subscription'"):
|
||||
_validate_subscription_query("cpuSubscription { usage }")
|
||||
|
||||
def test_missing_operation_body_rejected(self) -> None:
|
||||
with pytest.raises(ToolError, match="must start with 'subscription'"):
|
||||
_validate_subscription_query("subscription")
|
||||
|
||||
def test_subscription_without_field_rejected(self) -> None:
|
||||
"""subscription { } with no field name doesn't match the pattern."""
|
||||
with pytest.raises(ToolError, match="must start with 'subscription'"):
|
||||
_validate_subscription_query("subscription { }")
|
||||
|
||||
|
||||
class TestValidateSubscriptionQueryUnknownName:
|
||||
"""Subscription names not in the whitelist must be rejected even if format is valid."""
|
||||
|
||||
def test_unknown_subscription_name_rejected(self) -> None:
|
||||
query = "subscription { unknownSubscription { data } }"
|
||||
with pytest.raises(ToolError, match="not allowed"):
|
||||
_validate_subscription_query(query)
|
||||
|
||||
def test_error_message_includes_allowed_list(self) -> None:
|
||||
"""Error message must list the allowed subscription field names for usability."""
|
||||
query = "subscription { badSub { data } }"
|
||||
with pytest.raises(ToolError, match="Allowed fields"):
|
||||
_validate_subscription_query(query)
|
||||
|
||||
def test_arbitrary_field_name_rejected(self) -> None:
|
||||
query = "subscription { users { id email } }"
|
||||
with pytest.raises(ToolError, match="not allowed"):
|
||||
_validate_subscription_query(query)
|
||||
|
||||
def test_close_but_not_whitelisted_rejected(self) -> None:
|
||||
"""'cpuSubscription' (old operation-style name) is not in the field allow-list."""
|
||||
query = "subscription { cpuSubscription { usage } }"
|
||||
with pytest.raises(ToolError, match="not allowed"):
|
||||
_validate_subscription_query(query)
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_users tool.
|
||||
"""Tests for user subactions of the consolidated unraid tool.
|
||||
|
||||
NOTE: Unraid GraphQL API only supports the me() query.
|
||||
User management operations (list, add, delete, cloud, remote_access, origins) are NOT available in the API.
|
||||
@@ -15,35 +15,35 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch("unraid_mcp.tools.users.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.users", "register_users_tool", "unraid_users")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestUsersValidation:
|
||||
"""Test validation for invalid actions."""
|
||||
"""Test validation for invalid subactions."""
|
||||
|
||||
async def test_invalid_action_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test that non-existent actions are rejected with clear error."""
|
||||
async def test_invalid_subaction_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test that non-existent subactions are rejected with clear error."""
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="list")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="user", subaction="list")
|
||||
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="add")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="user", subaction="add")
|
||||
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="delete")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="user", subaction="delete")
|
||||
|
||||
with pytest.raises(ToolError, match="Invalid action"):
|
||||
await tool_fn(action="cloud")
|
||||
with pytest.raises(ToolError, match="Invalid subaction"):
|
||||
await tool_fn(action="user", subaction="cloud")
|
||||
|
||||
|
||||
class TestUsersActions:
|
||||
"""Test the single supported action: me."""
|
||||
"""Test the single supported subaction: me."""
|
||||
|
||||
async def test_me(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test querying current authenticated user."""
|
||||
@@ -51,27 +51,18 @@ class TestUsersActions:
|
||||
"me": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="me")
|
||||
result = await tool_fn(action="user", subaction="me")
|
||||
assert result["name"] == "root"
|
||||
assert result["roles"] == ["ADMIN"]
|
||||
_mock_graphql.assert_called_once()
|
||||
|
||||
async def test_me_default_action(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test that 'me' is the default action."""
|
||||
_mock_graphql.return_value = {
|
||||
"me": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn()
|
||||
assert result["name"] == "root"
|
||||
|
||||
|
||||
class TestUsersNoneHandling:
|
||||
"""Verify actions return empty dict (not TypeError) when API returns None."""
|
||||
"""Verify subactions return empty dict (not TypeError) when API returns None."""
|
||||
|
||||
async def test_me_returns_none(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Test that me returns empty dict when API returns None."""
|
||||
_mock_graphql.return_value = {"me": None}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="me")
|
||||
result = await tool_fn(action="user", subaction="me")
|
||||
assert result == {}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Tests for unraid_vm tool."""
|
||||
"""Tests for vm subactions of the consolidated unraid tool."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import AsyncMock, patch
|
||||
@@ -11,34 +11,32 @@ from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@pytest.fixture
|
||||
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
||||
with patch(
|
||||
"unraid_mcp.tools.virtualization.make_graphql_request", new_callable=AsyncMock
|
||||
) as mock:
|
||||
with patch("unraid_mcp.tools.unraid.make_graphql_request", new_callable=AsyncMock) as mock:
|
||||
yield mock
|
||||
|
||||
|
||||
def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm")
|
||||
return make_tool_fn("unraid_mcp.tools.unraid", "register_unraid_tool", "unraid")
|
||||
|
||||
|
||||
class TestVmValidation:
|
||||
async def test_actions_except_list_require_vm_id(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
for action in ("details", "start", "stop", "pause", "resume", "reboot"):
|
||||
for subaction in ("details", "start", "stop", "pause", "resume", "reboot"):
|
||||
with pytest.raises(ToolError, match="vm_id"):
|
||||
await tool_fn(action=action)
|
||||
await tool_fn(action="vm", subaction=subaction)
|
||||
|
||||
async def test_destructive_actions_require_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
tool_fn = _make_tool()
|
||||
for action in ("force_stop", "reset"):
|
||||
with pytest.raises(ToolError, match="destructive"):
|
||||
await tool_fn(action=action, vm_id="uuid-1")
|
||||
for subaction in ("force_stop", "reset"):
|
||||
with pytest.raises(ToolError, match="not confirmed"):
|
||||
await tool_fn(action="vm", subaction=subaction, vm_id="uuid-1")
|
||||
|
||||
async def test_destructive_vm_id_check_before_confirm(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Destructive actions without vm_id should fail on vm_id first (validated before confirm)."""
|
||||
"""Destructive subactions without vm_id should fail on vm_id first (validated before confirm)."""
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="vm_id"):
|
||||
await tool_fn(action="force_stop")
|
||||
await tool_fn(action="vm", subaction="force_stop")
|
||||
|
||||
|
||||
class TestVmActions:
|
||||
@@ -51,20 +49,20 @@ class TestVmActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="vm", subaction="list")
|
||||
assert len(result["vms"]) == 1
|
||||
assert result["vms"][0]["name"] == "Windows 11"
|
||||
|
||||
async def test_list_empty(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vms": {"domains": []}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="vm", subaction="list")
|
||||
assert result["vms"] == []
|
||||
|
||||
async def test_list_no_vms_key(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list")
|
||||
result = await tool_fn(action="vm", subaction="list")
|
||||
assert result["vms"] == []
|
||||
|
||||
async def test_details_by_uuid(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -74,7 +72,7 @@ class TestVmActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="details", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="details", vm_id="uuid-1")
|
||||
assert result["name"] == "Win11"
|
||||
|
||||
async def test_details_by_name(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -84,7 +82,7 @@ class TestVmActions:
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="details", vm_id="Win11")
|
||||
result = await tool_fn(action="vm", subaction="details", vm_id="Win11")
|
||||
assert result["uuid"] == "uuid-1"
|
||||
|
||||
async def test_details_not_found(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -95,48 +93,48 @@ class TestVmActions:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="not found"):
|
||||
await tool_fn(action="details", vm_id="nonexistent")
|
||||
await tool_fn(action="vm", subaction="details", vm_id="nonexistent")
|
||||
|
||||
async def test_start_vm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"start": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "start"
|
||||
assert result["subaction"] == "start"
|
||||
|
||||
async def test_force_stop(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"forceStop": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="force_stop", vm_id="uuid-1", confirm=True)
|
||||
result = await tool_fn(action="vm", subaction="force_stop", vm_id="uuid-1", confirm=True)
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "force_stop"
|
||||
assert result["subaction"] == "force_stop"
|
||||
|
||||
async def test_stop_vm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"stop": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="stop", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="stop", vm_id="uuid-1")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "stop"
|
||||
assert result["subaction"] == "stop"
|
||||
|
||||
async def test_pause_vm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"pause": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="pause", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="pause", vm_id="uuid-1")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "pause"
|
||||
assert result["subaction"] == "pause"
|
||||
|
||||
async def test_resume_vm(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {"resume": True}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="resume", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="resume", vm_id="uuid-1")
|
||||
assert result["success"] is True
|
||||
assert result["action"] == "resume"
|
||||
assert result["subaction"] == "resume"
|
||||
|
||||
async def test_mutation_unexpected_response(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {"vm": {}}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to start"):
|
||||
await tool_fn(action="start", vm_id="uuid-1")
|
||||
await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
|
||||
|
||||
class TestVmMutationFailures:
|
||||
@@ -147,38 +145,38 @@ class TestVmMutationFailures:
|
||||
_mock_graphql.return_value = {}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to start"):
|
||||
await tool_fn(action="start", vm_id="uuid-1")
|
||||
await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
|
||||
async def test_start_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""VM start returning False should still succeed (the tool reports the raw value)."""
|
||||
_mock_graphql.return_value = {"vm": {"start": False}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="start", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
assert result["success"] is False
|
||||
assert result["action"] == "start"
|
||||
assert result["subaction"] == "start"
|
||||
|
||||
async def test_stop_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""VM stop returning None in the field should succeed (key exists, value is None)."""
|
||||
_mock_graphql.return_value = {"vm": {"stop": None}}
|
||||
tool_fn = _make_tool()
|
||||
# The check is `field in data["vm"]` — `in` checks key existence, not truthiness
|
||||
result = await tool_fn(action="stop", vm_id="uuid-1")
|
||||
result = await tool_fn(action="vm", subaction="stop", vm_id="uuid-1")
|
||||
assert result["success"] is None
|
||||
assert result["action"] == "stop"
|
||||
assert result["subaction"] == "stop"
|
||||
|
||||
async def test_force_stop_mutation_empty_vm_object(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Empty vm object with no matching field should raise ToolError."""
|
||||
_mock_graphql.return_value = {"vm": {}}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to force_stop"):
|
||||
await tool_fn(action="force_stop", vm_id="uuid-1", confirm=True)
|
||||
await tool_fn(action="vm", subaction="force_stop", vm_id="uuid-1", confirm=True)
|
||||
|
||||
async def test_reboot_mutation_vm_key_none(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""vm key being None should raise ToolError."""
|
||||
_mock_graphql.return_value = {"vm": None}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="Failed to reboot"):
|
||||
await tool_fn(action="reboot", vm_id="uuid-1")
|
||||
await tool_fn(action="vm", subaction="reboot", vm_id="uuid-1")
|
||||
|
||||
async def test_mutation_timeout(self, _mock_graphql: AsyncMock) -> None:
|
||||
"""Mid-operation timeout should be wrapped in ToolError."""
|
||||
@@ -186,4 +184,4 @@ class TestVmMutationFailures:
|
||||
_mock_graphql.side_effect = TimeoutError("VM operation timed out")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="timed out"):
|
||||
await tool_fn(action="start", vm_id="uuid-1")
|
||||
await tool_fn(action="vm", subaction="start", vm_id="uuid-1")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Unraid MCP Server Package.
|
||||
"""Unraid MCP Server Package."""
|
||||
|
||||
A modular MCP (Model Context Protocol) server that provides tools to interact
|
||||
with an Unraid server's GraphQL API.
|
||||
"""
|
||||
from .version import VERSION
|
||||
|
||||
__version__ = "0.2.0"
|
||||
|
||||
__version__ = VERSION
|
||||
|
||||
@@ -5,16 +5,10 @@ that cap at 10MB and start over (no rotation) for consistent use across all modu
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import pytz
|
||||
from rich.align import Align
|
||||
from rich.console import Console
|
||||
from rich.logging import RichHandler
|
||||
from rich.panel import Panel
|
||||
from rich.rule import Rule
|
||||
from rich.text import Text
|
||||
|
||||
|
||||
try:
|
||||
@@ -28,7 +22,7 @@ from .settings import LOG_FILE_PATH, LOG_LEVEL_STR
|
||||
|
||||
|
||||
# Global Rich console for consistent formatting
|
||||
console = Console(stderr=True, force_terminal=True)
|
||||
console = Console(stderr=True)
|
||||
|
||||
|
||||
class OverwriteFileHandler(logging.FileHandler):
|
||||
@@ -45,29 +39,45 @@ class OverwriteFileHandler(logging.FileHandler):
|
||||
delay: Whether to delay file opening
|
||||
"""
|
||||
self.max_bytes = max_bytes
|
||||
self._emit_count = 0
|
||||
self._check_interval = 100
|
||||
super().__init__(filename, mode, encoding, delay)
|
||||
|
||||
def emit(self, record):
|
||||
"""Emit a record, checking file size and overwriting if needed."""
|
||||
# Check file size before writing
|
||||
if self.stream and hasattr(self.stream, "name"):
|
||||
"""Emit a record, checking file size periodically and overwriting if needed."""
|
||||
self._emit_count += 1
|
||||
if (
|
||||
(self._emit_count == 1 or self._emit_count % self._check_interval == 0)
|
||||
and self.stream
|
||||
and hasattr(self.stream, "name")
|
||||
):
|
||||
try:
|
||||
base_path = Path(self.baseFilename)
|
||||
if base_path.exists():
|
||||
file_size = base_path.stat().st_size
|
||||
if file_size >= self.max_bytes:
|
||||
# Close current stream
|
||||
if self.stream:
|
||||
self.stream.close()
|
||||
|
||||
# Remove the old file and start fresh
|
||||
if base_path.exists():
|
||||
base_path.unlink()
|
||||
|
||||
# Reopen with truncate mode
|
||||
file_size = base_path.stat().st_size if base_path.exists() else 0
|
||||
if file_size >= self.max_bytes:
|
||||
old_stream = self.stream
|
||||
self.stream = None
|
||||
try:
|
||||
old_stream.close()
|
||||
base_path.unlink(missing_ok=True)
|
||||
self.stream = self._open()
|
||||
except OSError:
|
||||
# Recovery: attempt to reopen even if unlink failed
|
||||
try:
|
||||
self.stream = self._open()
|
||||
except OSError:
|
||||
# old_stream is already closed — do NOT restore it.
|
||||
# Leave self.stream = None so super().emit() skips output
|
||||
# rather than writing to a closed file descriptor.
|
||||
import sys
|
||||
|
||||
# Log a marker that the file was reset
|
||||
print(
|
||||
"WARNING: Failed to reopen log file after rotation. "
|
||||
"File logging suspended until next successful open.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
if self.stream is not None:
|
||||
reset_record = logging.LogRecord(
|
||||
name="UnraidMCPServer.Logging",
|
||||
level=logging.INFO,
|
||||
@@ -91,6 +101,28 @@ class OverwriteFileHandler(logging.FileHandler):
|
||||
super().emit(record)
|
||||
|
||||
|
||||
def _create_shared_file_handler() -> OverwriteFileHandler:
|
||||
"""Create the single shared file handler for all loggers.
|
||||
|
||||
Returns:
|
||||
Configured OverwriteFileHandler instance
|
||||
"""
|
||||
numeric_log_level = getattr(logging, LOG_LEVEL_STR, logging.INFO)
|
||||
handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8")
|
||||
handler.setLevel(numeric_log_level)
|
||||
handler.setFormatter(
|
||||
logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s"
|
||||
)
|
||||
)
|
||||
return handler
|
||||
|
||||
|
||||
# Single shared file handler — all loggers reuse this instance to avoid
|
||||
# race conditions from multiple OverwriteFileHandler instances on the same file.
|
||||
_shared_file_handler = _create_shared_file_handler()
|
||||
|
||||
|
||||
def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger:
|
||||
"""Set up and configure the logger with console and file handlers.
|
||||
|
||||
@@ -118,19 +150,13 @@ def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger:
|
||||
show_level=True,
|
||||
show_path=False,
|
||||
rich_tracebacks=True,
|
||||
tracebacks_show_locals=True,
|
||||
tracebacks_show_locals=False,
|
||||
)
|
||||
console_handler.setLevel(numeric_log_level)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
# File Handler with 10MB cap (overwrites instead of rotating)
|
||||
file_handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8")
|
||||
file_handler.setLevel(numeric_log_level)
|
||||
file_formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s"
|
||||
)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
logger.addHandler(file_handler)
|
||||
# Reuse the shared file handler
|
||||
logger.addHandler(_shared_file_handler)
|
||||
|
||||
return logger
|
||||
|
||||
@@ -157,59 +183,28 @@ def configure_fastmcp_logger_with_rich() -> logging.Logger | None:
|
||||
show_level=True,
|
||||
show_path=False,
|
||||
rich_tracebacks=True,
|
||||
tracebacks_show_locals=True,
|
||||
tracebacks_show_locals=False,
|
||||
markup=True,
|
||||
)
|
||||
console_handler.setLevel(numeric_log_level)
|
||||
fastmcp_logger.addHandler(console_handler)
|
||||
|
||||
# File Handler with 10MB cap (overwrites instead of rotating)
|
||||
file_handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8")
|
||||
file_handler.setLevel(numeric_log_level)
|
||||
file_formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s"
|
||||
)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
fastmcp_logger.addHandler(file_handler)
|
||||
# Reuse the shared file handler
|
||||
fastmcp_logger.addHandler(_shared_file_handler)
|
||||
|
||||
fastmcp_logger.setLevel(numeric_log_level)
|
||||
|
||||
# Also configure the root logger to catch any other logs
|
||||
# Attach shared file handler to the root logger so that library/third-party
|
||||
# loggers (httpx, websockets, etc.) whose propagate=True flows up to root
|
||||
# will also be written to the log file, not just the console.
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.handlers.clear()
|
||||
root_logger.propagate = False
|
||||
|
||||
# Rich Console Handler for root logger
|
||||
root_console_handler = RichHandler(
|
||||
console=console,
|
||||
show_time=True,
|
||||
show_level=True,
|
||||
show_path=False,
|
||||
rich_tracebacks=True,
|
||||
tracebacks_show_locals=True,
|
||||
markup=True,
|
||||
)
|
||||
root_console_handler.setLevel(numeric_log_level)
|
||||
root_logger.addHandler(root_console_handler)
|
||||
|
||||
# File Handler for root logger with 10MB cap (overwrites instead of rotating)
|
||||
root_file_handler = OverwriteFileHandler(
|
||||
LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8"
|
||||
)
|
||||
root_file_handler.setLevel(numeric_log_level)
|
||||
root_file_handler.setFormatter(file_formatter)
|
||||
root_logger.addHandler(root_file_handler)
|
||||
root_logger.setLevel(numeric_log_level)
|
||||
if _shared_file_handler not in root_logger.handlers:
|
||||
root_logger.addHandler(_shared_file_handler)
|
||||
|
||||
return fastmcp_logger
|
||||
|
||||
|
||||
def setup_uvicorn_logging() -> logging.Logger | None:
|
||||
"""Configure uvicorn and other third-party loggers to use Rich formatting."""
|
||||
# This function is kept for backward compatibility but now delegates to FastMCP
|
||||
return configure_fastmcp_logger_with_rich()
|
||||
|
||||
|
||||
def log_configuration_status(logger: logging.Logger) -> None:
|
||||
"""Log configuration status at startup.
|
||||
|
||||
@@ -242,97 +237,6 @@ def log_configuration_status(logger: logging.Logger) -> None:
|
||||
logger.error(f"Missing required configuration: {config['missing_config']}")
|
||||
|
||||
|
||||
# Development logging helpers for Rich formatting
|
||||
def get_est_timestamp() -> str:
|
||||
"""Get current timestamp in EST timezone with YY/MM/DD format."""
|
||||
est = pytz.timezone("US/Eastern")
|
||||
now = datetime.now(est)
|
||||
return now.strftime("%y/%m/%d %H:%M:%S")
|
||||
|
||||
|
||||
def log_header(title: str) -> None:
|
||||
"""Print a beautiful header panel with Nordic blue styling."""
|
||||
panel = Panel(
|
||||
Align.center(Text(title, style="bold white")),
|
||||
style="#5E81AC", # Nordic blue
|
||||
padding=(0, 2),
|
||||
border_style="#81A1C1", # Light Nordic blue
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
|
||||
def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0) -> None:
|
||||
"""Log a message with specific level and indentation."""
|
||||
timestamp = get_est_timestamp()
|
||||
indent_str = " " * indent
|
||||
|
||||
# Enhanced Nordic color scheme with more blues
|
||||
level_config = {
|
||||
"error": {"color": "#BF616A", "icon": "❌", "style": "bold"}, # Nordic red
|
||||
"warning": {"color": "#EBCB8B", "icon": "⚠️", "style": ""}, # Nordic yellow
|
||||
"success": {"color": "#A3BE8C", "icon": "✅", "style": "bold"}, # Nordic green
|
||||
"info": {"color": "#5E81AC", "icon": "\u2139\ufe0f", "style": "bold"}, # Nordic blue (bold)
|
||||
"status": {"color": "#81A1C1", "icon": "🔍", "style": ""}, # Light Nordic blue
|
||||
"debug": {"color": "#4C566A", "icon": "🐛", "style": ""}, # Nordic dark gray
|
||||
}
|
||||
|
||||
config = level_config.get(
|
||||
level, {"color": "#81A1C1", "icon": "•", "style": ""}
|
||||
) # Default to light Nordic blue
|
||||
|
||||
# Create beautifully formatted text
|
||||
text = Text()
|
||||
|
||||
# Timestamp with Nordic blue styling
|
||||
text.append(f"[{timestamp}]", style="#81A1C1") # Light Nordic blue for timestamps
|
||||
text.append(" ")
|
||||
|
||||
# Indentation with Nordic blue styling
|
||||
if indent > 0:
|
||||
text.append(indent_str, style="#81A1C1")
|
||||
|
||||
# Level icon (only for certain levels)
|
||||
if level in ["error", "warning", "success"]:
|
||||
# Extract emoji from message if it starts with one, to avoid duplication
|
||||
if message and len(message) > 0 and ord(message[0]) >= 0x1F600: # Emoji range
|
||||
# Message already has emoji, don't add icon
|
||||
pass
|
||||
else:
|
||||
text.append(f"{config['icon']} ", style=config["color"])
|
||||
|
||||
# Message content
|
||||
message_style = f"{config['color']} {config['style']}".strip()
|
||||
text.append(message, style=message_style)
|
||||
|
||||
console.print(text)
|
||||
|
||||
|
||||
def log_separator() -> None:
|
||||
"""Print a beautiful separator line with Nordic blue styling."""
|
||||
console.print(Rule(style="#81A1C1"))
|
||||
|
||||
|
||||
# Convenience functions for different log levels
|
||||
def log_error(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "error", indent)
|
||||
|
||||
|
||||
def log_warning(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "warning", indent)
|
||||
|
||||
|
||||
def log_success(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "success", indent)
|
||||
|
||||
|
||||
def log_info(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "info", indent)
|
||||
|
||||
|
||||
def log_status(message: str, indent: int = 0) -> None:
|
||||
log_with_level_and_indent(message, "status", indent)
|
||||
|
||||
|
||||
# Global logger instance - modules can import this directly
|
||||
if FASTMCP_AVAILABLE:
|
||||
# Use FastMCP logger with Rich formatting
|
||||
@@ -341,5 +245,3 @@ if FASTMCP_AVAILABLE:
|
||||
else:
|
||||
# Fallback to our custom logger if FastMCP is not available
|
||||
logger = setup_logger()
|
||||
# Setup uvicorn logging when module is imported
|
||||
setup_uvicorn_logging()
|
||||
|
||||
@@ -10,19 +10,28 @@ from typing import Any
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from ..version import VERSION as APP_VERSION
|
||||
|
||||
|
||||
# Get the script directory (config module location)
|
||||
SCRIPT_DIR = Path(__file__).parent # /home/user/code/unraid-mcp/unraid_mcp/config/
|
||||
UNRAID_MCP_DIR = SCRIPT_DIR.parent # /home/user/code/unraid-mcp/unraid_mcp/
|
||||
PROJECT_ROOT = UNRAID_MCP_DIR.parent # /home/user/code/unraid-mcp/
|
||||
|
||||
# Canonical credentials directory — version-agnostic, survives plugin version bumps.
|
||||
# Override with UNRAID_CREDENTIALS_DIR env var (useful for containers).
|
||||
CREDENTIALS_DIR = Path(os.getenv("UNRAID_CREDENTIALS_DIR", str(Path.home() / ".unraid-mcp")))
|
||||
CREDENTIALS_ENV_PATH = CREDENTIALS_DIR / ".env"
|
||||
|
||||
# Load environment variables from .env file
|
||||
# In container: First try /app/.env.local (mounted), then project root .env
|
||||
# Priority: canonical ~/.unraid-mcp/.env first, then dev/container fallbacks.
|
||||
dotenv_paths = [
|
||||
Path("/app/.env.local"), # Container mount point
|
||||
PROJECT_ROOT / ".env.local", # Project root .env.local
|
||||
PROJECT_ROOT / ".env", # Project root .env
|
||||
UNRAID_MCP_DIR / ".env", # Local .env in unraid_mcp/
|
||||
CREDENTIALS_ENV_PATH, # primary — ~/.unraid-mcp/.env (all runtimes)
|
||||
CREDENTIALS_DIR / ".env.local", # only used if ~/.unraid-mcp/.env absent
|
||||
Path("/app/.env.local"), # Docker compat mount
|
||||
PROJECT_ROOT / ".env.local", # dev overrides
|
||||
PROJECT_ROOT / ".env", # dev fallback
|
||||
UNRAID_MCP_DIR / ".env", # last resort
|
||||
]
|
||||
|
||||
for dotenv_path in dotenv_paths:
|
||||
@@ -30,16 +39,32 @@ for dotenv_path in dotenv_paths:
|
||||
load_dotenv(dotenv_path=dotenv_path)
|
||||
break
|
||||
|
||||
# Application Version
|
||||
VERSION = "0.2.0"
|
||||
|
||||
# Core API Configuration
|
||||
UNRAID_API_URL = os.getenv("UNRAID_API_URL")
|
||||
UNRAID_API_KEY = os.getenv("UNRAID_API_KEY")
|
||||
|
||||
|
||||
# Server Configuration
|
||||
UNRAID_MCP_PORT = int(os.getenv("UNRAID_MCP_PORT", "6970"))
|
||||
UNRAID_MCP_HOST = os.getenv("UNRAID_MCP_HOST", "0.0.0.0")
|
||||
def _parse_port(env_var: str, default: int) -> int:
|
||||
"""Parse a port number from environment variable with validation."""
|
||||
raw = os.getenv(env_var, str(default))
|
||||
try:
|
||||
port = int(raw)
|
||||
except ValueError:
|
||||
import sys
|
||||
|
||||
print(f"FATAL: {env_var}={raw!r} is not a valid integer port number", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if not (1 <= port <= 65535):
|
||||
import sys
|
||||
|
||||
print(f"FATAL: {env_var}={port} outside valid port range 1-65535", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return port
|
||||
|
||||
|
||||
UNRAID_MCP_PORT = _parse_port("UNRAID_MCP_PORT", 6970)
|
||||
UNRAID_MCP_HOST = os.getenv("UNRAID_MCP_HOST", "0.0.0.0") # noqa: S104 — intentional for Docker
|
||||
UNRAID_MCP_TRANSPORT = os.getenv("UNRAID_MCP_TRANSPORT", "streamable-http").lower()
|
||||
|
||||
# SSL Configuration
|
||||
@@ -54,11 +79,18 @@ else: # Path to CA bundle
|
||||
# Logging Configuration
|
||||
LOG_LEVEL_STR = os.getenv("UNRAID_MCP_LOG_LEVEL", "INFO").upper()
|
||||
LOG_FILE_NAME = os.getenv("UNRAID_MCP_LOG_FILE", "unraid-mcp.log")
|
||||
LOGS_DIR = Path("/tmp")
|
||||
# Use /.dockerenv as the container indicator for robust Docker detection.
|
||||
IS_DOCKER = Path("/.dockerenv").exists()
|
||||
LOGS_DIR = Path("/app/logs") if IS_DOCKER else PROJECT_ROOT / "logs"
|
||||
LOG_FILE_PATH = LOGS_DIR / LOG_FILE_NAME
|
||||
|
||||
# Ensure logs directory exists
|
||||
LOGS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
# Ensure logs directory exists; if creation fails, fall back to PROJECT_ROOT / ".cache" / "logs".
|
||||
try:
|
||||
LOGS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
except OSError:
|
||||
LOGS_DIR = PROJECT_ROOT / ".cache" / "logs"
|
||||
LOGS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
LOG_FILE_PATH = LOGS_DIR / LOG_FILE_NAME
|
||||
|
||||
# HTTP Client Configuration
|
||||
TIMEOUT_CONFIG = {
|
||||
@@ -83,6 +115,24 @@ def validate_required_config() -> tuple[bool, list[str]]:
|
||||
return len(missing) == 0, missing
|
||||
|
||||
|
||||
def is_configured() -> bool:
|
||||
"""Return True if both required credentials are present."""
|
||||
return bool(UNRAID_API_URL and UNRAID_API_KEY)
|
||||
|
||||
|
||||
def apply_runtime_config(api_url: str, api_key: str) -> None:
|
||||
"""Update module-level credential globals at runtime (post-elicitation).
|
||||
|
||||
Also sets matching environment variables so submodules that read
|
||||
os.getenv() after import see the new values.
|
||||
"""
|
||||
global UNRAID_API_URL, UNRAID_API_KEY
|
||||
UNRAID_API_URL = api_url
|
||||
UNRAID_API_KEY = api_key
|
||||
os.environ["UNRAID_API_URL"] = api_url
|
||||
os.environ["UNRAID_API_KEY"] = api_key
|
||||
|
||||
|
||||
def get_config_summary() -> dict[str, Any]:
|
||||
"""Get a summary of current configuration (safe for logging).
|
||||
|
||||
@@ -91,9 +141,11 @@ def get_config_summary() -> dict[str, Any]:
|
||||
"""
|
||||
is_valid, missing = validate_required_config()
|
||||
|
||||
from ..core.utils import safe_display_url
|
||||
|
||||
return {
|
||||
"api_url_configured": bool(UNRAID_API_URL),
|
||||
"api_url_preview": UNRAID_API_URL[:20] + "..." if UNRAID_API_URL else None,
|
||||
"api_url_preview": safe_display_url(UNRAID_API_URL) if UNRAID_API_URL else None,
|
||||
"api_key_configured": bool(UNRAID_API_KEY),
|
||||
"server_host": UNRAID_MCP_HOST,
|
||||
"server_port": UNRAID_MCP_PORT,
|
||||
@@ -104,3 +156,7 @@ def get_config_summary() -> dict[str, Any]:
|
||||
"config_valid": is_valid,
|
||||
"missing_config": missing if not is_valid else None,
|
||||
}
|
||||
|
||||
|
||||
# Re-export application version from a single source of truth.
|
||||
VERSION = APP_VERSION
|
||||
|
||||
@@ -5,24 +5,40 @@ to the Unraid API with proper timeout handling and error management.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Any
|
||||
import re
|
||||
import time
|
||||
from typing import Any, Final
|
||||
|
||||
import httpx
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import (
|
||||
TIMEOUT_CONFIG,
|
||||
UNRAID_API_KEY,
|
||||
UNRAID_API_URL,
|
||||
UNRAID_VERIFY_SSL,
|
||||
VERSION,
|
||||
)
|
||||
from ..core.exceptions import ToolError
|
||||
from ..core.exceptions import CredentialsNotConfiguredError, ToolError
|
||||
from .utils import safe_display_url
|
||||
|
||||
|
||||
# Sensitive keys to redact from debug logs
|
||||
_SENSITIVE_KEYS = {"password", "key", "secret", "token", "apikey"}
|
||||
# Sensitive keys to redact from debug logs (frozenset — immutable, Final — no accidental reassignment)
|
||||
_SENSITIVE_KEYS: Final[frozenset[str]] = frozenset(
|
||||
{
|
||||
"password",
|
||||
"key",
|
||||
"secret",
|
||||
"token",
|
||||
"apikey",
|
||||
"authorization",
|
||||
"cookie",
|
||||
"session",
|
||||
"credential",
|
||||
"passphrase",
|
||||
"jwt",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _is_sensitive_key(key: str) -> bool:
|
||||
@@ -31,14 +47,12 @@ def _is_sensitive_key(key: str) -> bool:
|
||||
return any(s in key_lower for s in _SENSITIVE_KEYS)
|
||||
|
||||
|
||||
def _redact_sensitive(obj: Any) -> Any:
|
||||
def redact_sensitive(obj: Any) -> Any:
|
||||
"""Recursively redact sensitive values from nested dicts/lists."""
|
||||
if isinstance(obj, dict):
|
||||
return {
|
||||
k: ("***" if _is_sensitive_key(k) else _redact_sensitive(v)) for k, v in obj.items()
|
||||
}
|
||||
return {k: ("***" if _is_sensitive_key(k) else redact_sensitive(v)) for k, v in obj.items()}
|
||||
if isinstance(obj, list):
|
||||
return [_redact_sensitive(item) for item in obj]
|
||||
return [redact_sensitive(item) for item in obj]
|
||||
return obj
|
||||
|
||||
|
||||
@@ -66,8 +80,128 @@ def get_timeout_for_operation(profile: str) -> httpx.Timeout:
|
||||
|
||||
|
||||
# Global connection pool (module-level singleton)
|
||||
# Python 3.12+ asyncio.Lock() is safe at module level — no running event loop required
|
||||
_http_client: httpx.AsyncClient | None = None
|
||||
_client_lock = asyncio.Lock()
|
||||
_client_lock: Final[asyncio.Lock] = asyncio.Lock()
|
||||
|
||||
|
||||
class _RateLimiter:
|
||||
"""Token bucket rate limiter for Unraid API (100 req / 10s hard limit).
|
||||
|
||||
Uses 90 tokens with 9.0 tokens/sec refill for 10% safety headroom.
|
||||
"""
|
||||
|
||||
def __init__(self, max_tokens: int = 90, refill_rate: float = 9.0) -> None:
|
||||
self.max_tokens = max_tokens
|
||||
self.tokens = float(max_tokens)
|
||||
self.refill_rate = refill_rate # tokens per second
|
||||
self.last_refill = time.monotonic()
|
||||
# asyncio.Lock() is safe to create at __init__ time (Python 3.12+)
|
||||
self._lock: Final[asyncio.Lock] = asyncio.Lock()
|
||||
|
||||
def _refill(self) -> None:
|
||||
"""Refill tokens based on elapsed time."""
|
||||
now = time.monotonic()
|
||||
elapsed = now - self.last_refill
|
||||
self.tokens = min(self.max_tokens, self.tokens + elapsed * self.refill_rate)
|
||||
self.last_refill = now
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Consume one token, waiting if necessary for refill."""
|
||||
while True:
|
||||
async with self._lock:
|
||||
self._refill()
|
||||
if self.tokens >= 1:
|
||||
self.tokens -= 1
|
||||
return
|
||||
wait_time = (1 - self.tokens) / self.refill_rate
|
||||
|
||||
# Sleep outside the lock so other coroutines aren't blocked
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
|
||||
_rate_limiter = _RateLimiter()
|
||||
|
||||
|
||||
# --- TTL Cache for stable read-only queries ---
|
||||
|
||||
# Queries whose results change infrequently and are safe to cache.
|
||||
# Mutations and volatile queries (metrics, docker, array state) are excluded.
|
||||
_CACHEABLE_QUERY_PREFIXES = frozenset(
|
||||
{
|
||||
"GetNetworkConfig",
|
||||
"GetRegistrationInfo",
|
||||
"GetOwner",
|
||||
"GetFlash",
|
||||
}
|
||||
)
|
||||
|
||||
_CACHE_TTL_SECONDS = 60.0
|
||||
_OPERATION_NAME_PATTERN = re.compile(r"^(?:query\s+)?([_A-Za-z][_0-9A-Za-z]*)\b")
|
||||
|
||||
|
||||
class _QueryCache:
|
||||
"""Simple TTL cache for GraphQL query responses.
|
||||
|
||||
Keyed by a hash of (query, variables). Entries expire after _CACHE_TTL_SECONDS.
|
||||
Only caches responses for queries whose operation name is in _CACHEABLE_QUERY_PREFIXES.
|
||||
Mutation requests always bypass the cache.
|
||||
|
||||
Thread-safe via asyncio.Lock. Bounded to _MAX_ENTRIES with FIFO eviction (oldest
|
||||
expiry timestamp evicted first when the store is full).
|
||||
"""
|
||||
|
||||
_MAX_ENTRIES: Final[int] = 256
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._store: dict[str, tuple[float, dict[str, Any]]] = {}
|
||||
self._lock: Final[asyncio.Lock] = asyncio.Lock()
|
||||
|
||||
@staticmethod
|
||||
def _cache_key(query: str, variables: dict[str, Any] | None) -> str:
|
||||
raw = query + json.dumps(variables or {}, sort_keys=True)
|
||||
return hashlib.sha256(raw.encode()).hexdigest()
|
||||
|
||||
@staticmethod
|
||||
def is_cacheable(query: str) -> bool:
|
||||
"""Check if a query is eligible for caching based on its operation name."""
|
||||
normalized = query.lstrip()
|
||||
if normalized.startswith("mutation"):
|
||||
return False
|
||||
match = _OPERATION_NAME_PATTERN.match(normalized)
|
||||
if not match:
|
||||
return False
|
||||
return match.group(1) in _CACHEABLE_QUERY_PREFIXES
|
||||
|
||||
async def get(self, query: str, variables: dict[str, Any] | None) -> dict[str, Any] | None:
|
||||
"""Return cached result if present and not expired, else None."""
|
||||
async with self._lock:
|
||||
key = self._cache_key(query, variables)
|
||||
entry = self._store.get(key)
|
||||
if entry is None:
|
||||
return None
|
||||
expires_at, data = entry
|
||||
if time.monotonic() > expires_at:
|
||||
del self._store[key]
|
||||
return None
|
||||
return data
|
||||
|
||||
async def put(self, query: str, variables: dict[str, Any] | None, data: dict[str, Any]) -> None:
|
||||
"""Store a query result with TTL expiry, evicting oldest entry if at capacity."""
|
||||
async with self._lock:
|
||||
if len(self._store) >= self._MAX_ENTRIES:
|
||||
oldest_key = min(self._store, key=lambda k: self._store[k][0])
|
||||
del self._store[oldest_key]
|
||||
key = self._cache_key(query, variables)
|
||||
self._store[key] = (time.monotonic() + _CACHE_TTL_SECONDS, data)
|
||||
|
||||
async def invalidate_all(self) -> None:
|
||||
"""Clear the entire cache (called after mutations)."""
|
||||
async with self._lock:
|
||||
self._store.clear()
|
||||
|
||||
|
||||
_query_cache = _QueryCache()
|
||||
|
||||
|
||||
def is_idempotent_error(error_message: str, operation: str) -> bool:
|
||||
@@ -109,7 +243,7 @@ async def _create_http_client() -> httpx.AsyncClient:
|
||||
return httpx.AsyncClient(
|
||||
# Connection pool settings
|
||||
limits=httpx.Limits(
|
||||
max_keepalive_connections=20, max_connections=100, keepalive_expiry=30.0
|
||||
max_keepalive_connections=20, max_connections=20, keepalive_expiry=30.0
|
||||
),
|
||||
# Default timeout (can be overridden per-request)
|
||||
timeout=DEFAULT_TIMEOUT,
|
||||
@@ -123,33 +257,28 @@ async def _create_http_client() -> httpx.AsyncClient:
|
||||
async def get_http_client() -> httpx.AsyncClient:
|
||||
"""Get or create shared HTTP client with connection pooling.
|
||||
|
||||
The client is protected by an asyncio lock to prevent concurrent creation.
|
||||
If the existing client was closed (e.g., during shutdown), a new one is created.
|
||||
Uses double-checked locking: fast-path skips the lock when the client
|
||||
is already initialized, only acquiring it for initial creation or
|
||||
recovery after close.
|
||||
|
||||
Returns:
|
||||
Singleton AsyncClient instance with connection pooling enabled
|
||||
"""
|
||||
global _http_client
|
||||
|
||||
# Fast-path: skip lock if client is already initialized and open
|
||||
client = _http_client
|
||||
if client is not None and not client.is_closed:
|
||||
return client
|
||||
|
||||
# Slow-path: acquire lock for initialization
|
||||
async with _client_lock:
|
||||
if _http_client is None or _http_client.is_closed:
|
||||
_http_client = await _create_http_client()
|
||||
logger.info(
|
||||
"Created shared HTTP client with connection pooling (20 keepalive, 100 max connections)"
|
||||
"Created shared HTTP client with connection pooling (20 keepalive, 20 max connections)"
|
||||
)
|
||||
|
||||
client = _http_client
|
||||
|
||||
# Verify client is still open after releasing the lock.
|
||||
# In asyncio's cooperative model this is unlikely to fail, but guards
|
||||
# against edge cases where close_http_client runs between yield points.
|
||||
if client.is_closed:
|
||||
async with _client_lock:
|
||||
_http_client = await _create_http_client()
|
||||
client = _http_client
|
||||
logger.info("Re-created HTTP client after unexpected close")
|
||||
|
||||
return client
|
||||
return _http_client
|
||||
|
||||
|
||||
async def close_http_client() -> None:
|
||||
@@ -182,39 +311,71 @@ async def make_graphql_request(
|
||||
Dict containing the GraphQL response data
|
||||
|
||||
Raises:
|
||||
CredentialsNotConfiguredError: When UNRAID_API_URL or UNRAID_API_KEY are absent at call time
|
||||
ToolError: For HTTP errors, network errors, or non-idempotent GraphQL errors
|
||||
"""
|
||||
if not UNRAID_API_URL:
|
||||
raise ToolError("UNRAID_API_URL not configured")
|
||||
# Local import to get current runtime values — module-level names are captured at import time
|
||||
# and won't reflect runtime changes (e.g., after elicitation sets them via apply_runtime_config).
|
||||
from ..config import settings as _settings
|
||||
|
||||
if not UNRAID_API_KEY:
|
||||
raise ToolError("UNRAID_API_KEY not configured")
|
||||
if not _settings.UNRAID_API_URL or not _settings.UNRAID_API_KEY:
|
||||
raise CredentialsNotConfiguredError()
|
||||
|
||||
# Check TTL cache — short-circuits rate limiter on hits
|
||||
is_mutation = query.lstrip().startswith("mutation")
|
||||
if not is_mutation and _query_cache.is_cacheable(query):
|
||||
cached = await _query_cache.get(query, variables)
|
||||
if cached is not None:
|
||||
logger.debug("Returning cached response for query")
|
||||
return cached
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": UNRAID_API_KEY,
|
||||
"X-API-Key": _settings.UNRAID_API_KEY,
|
||||
}
|
||||
|
||||
payload: dict[str, Any] = {"query": query}
|
||||
if variables:
|
||||
payload["variables"] = variables
|
||||
|
||||
logger.debug(f"Making GraphQL request to {UNRAID_API_URL}:")
|
||||
logger.debug(f"Making GraphQL request to {safe_display_url(_settings.UNRAID_API_URL)}:")
|
||||
logger.debug(f"Query: {query[:200]}{'...' if len(query) > 200 else ''}") # Log truncated query
|
||||
if variables:
|
||||
logger.debug(f"Variables: {_redact_sensitive(variables)}")
|
||||
logger.debug(f"Variables: {redact_sensitive(variables)}")
|
||||
|
||||
try:
|
||||
# Rate limit: consume a token before making the request
|
||||
await _rate_limiter.acquire()
|
||||
|
||||
# Get the shared HTTP client with connection pooling
|
||||
client = await get_http_client()
|
||||
|
||||
# Override timeout if custom timeout specified
|
||||
# Retry loop for 429 rate limit responses
|
||||
post_kwargs: dict[str, Any] = {"json": payload, "headers": headers}
|
||||
if custom_timeout is not None:
|
||||
response = await client.post(
|
||||
UNRAID_API_URL, json=payload, headers=headers, timeout=custom_timeout
|
||||
post_kwargs["timeout"] = custom_timeout
|
||||
|
||||
response: httpx.Response | None = None
|
||||
for attempt in range(3):
|
||||
response = await client.post(_settings.UNRAID_API_URL, **post_kwargs)
|
||||
if response.status_code == 429:
|
||||
backoff = 2**attempt
|
||||
logger.warning(
|
||||
f"Rate limited (429) by Unraid API, retrying in {backoff}s (attempt {attempt + 1}/3)"
|
||||
)
|
||||
await asyncio.sleep(backoff)
|
||||
continue
|
||||
break
|
||||
|
||||
if response is None: # pragma: no cover — guaranteed by loop
|
||||
raise ToolError("No response received after retry attempts")
|
||||
|
||||
# Provide a clear message when all retries are exhausted on 429
|
||||
if response.status_code == 429:
|
||||
logger.error("Rate limit (429) persisted after 3 retries — request aborted")
|
||||
raise ToolError(
|
||||
"Unraid API is rate limiting requests. Wait ~10 seconds before retrying."
|
||||
)
|
||||
else:
|
||||
response = await client.post(UNRAID_API_URL, json=payload, headers=headers)
|
||||
|
||||
response.raise_for_status() # Raise an exception for HTTP error codes 4xx/5xx
|
||||
|
||||
@@ -245,14 +406,27 @@ async def make_graphql_request(
|
||||
|
||||
logger.debug("GraphQL request successful.")
|
||||
data = response_data.get("data", {})
|
||||
return data if isinstance(data, dict) else {} # Ensure we return dict
|
||||
result = data if isinstance(data, dict) else {} # Ensure we return dict
|
||||
|
||||
# Invalidate cache on mutations; cache eligible query results
|
||||
if is_mutation:
|
||||
await _query_cache.invalidate_all()
|
||||
elif _query_cache.is_cacheable(query):
|
||||
await _query_cache.put(query, variables, result)
|
||||
|
||||
return result
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
# Log full details internally; only expose status code to MCP client
|
||||
logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
|
||||
raise ToolError(f"HTTP error {e.response.status_code}: {e.response.text}") from e
|
||||
raise ToolError(
|
||||
f"Unraid API returned HTTP {e.response.status_code}. Check server logs for details."
|
||||
) from e
|
||||
except httpx.RequestError as e:
|
||||
# Log full error internally; give safe summary to MCP client
|
||||
logger.error(f"Request error occurred: {e}")
|
||||
raise ToolError(f"Network connection error: {e!s}") from e
|
||||
raise ToolError(f"Network error connecting to Unraid API: {type(e).__name__}") from e
|
||||
except json.JSONDecodeError as e:
|
||||
# Log full decode error; give safe summary to MCP client
|
||||
logger.error(f"Failed to decode JSON response: {e}")
|
||||
raise ToolError(f"Invalid JSON response from Unraid API: {e!s}") from e
|
||||
raise ToolError("Unraid API returned an invalid response (not valid JSON)") from e
|
||||
|
||||
@@ -4,6 +4,10 @@ This module defines custom exception classes for consistent error handling
|
||||
throughout the application, with proper integration to FastMCP's error system.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
from collections.abc import Iterator
|
||||
|
||||
from fastmcp.exceptions import ToolError as FastMCPToolError
|
||||
|
||||
|
||||
@@ -19,36 +23,55 @@ class ToolError(FastMCPToolError):
|
||||
pass
|
||||
|
||||
|
||||
class ConfigurationError(ToolError):
|
||||
"""Raised when there are configuration-related errors."""
|
||||
class CredentialsNotConfiguredError(Exception):
|
||||
"""Raised when UNRAID_API_URL or UNRAID_API_KEY are not set.
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class UnraidAPIError(ToolError):
|
||||
"""Raised when the Unraid API returns an error or is unreachable."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class SubscriptionError(ToolError):
|
||||
"""Raised when there are WebSocket subscription-related errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ValidationError(ToolError):
|
||||
"""Raised when input validation fails."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IdempotentOperationError(ToolError):
|
||||
"""Raised when an operation is idempotent (already in desired state).
|
||||
|
||||
This is used internally to signal that an operation was already complete,
|
||||
which should typically be converted to a success response rather than
|
||||
propagated as an error to the user.
|
||||
Used as a sentinel to trigger elicitation rather than a hard crash.
|
||||
"""
|
||||
|
||||
pass
|
||||
def __str__(self) -> str:
|
||||
return "Unraid credentials are not configured."
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def tool_error_handler(
|
||||
tool_name: str,
|
||||
action: str,
|
||||
logger: logging.Logger,
|
||||
) -> Iterator[None]:
|
||||
"""Context manager that standardizes tool error handling.
|
||||
|
||||
Re-raises ToolError as-is. Converts CredentialsNotConfiguredError to a ToolError
|
||||
with setup instructions including CREDENTIALS_ENV_PATH; does not log.
|
||||
Gives TimeoutError a descriptive message. Catches all other exceptions,
|
||||
logs them with full traceback, and wraps them in ToolError.
|
||||
|
||||
Args:
|
||||
tool_name: The tool name for error messages (e.g., "docker", "vm").
|
||||
action: The current action being executed.
|
||||
logger: The logger instance to use for error logging.
|
||||
"""
|
||||
try:
|
||||
yield
|
||||
except ToolError:
|
||||
raise
|
||||
except CredentialsNotConfiguredError as e:
|
||||
from ..config.settings import CREDENTIALS_ENV_PATH
|
||||
|
||||
raise ToolError(
|
||||
f"Credentials not configured. Run unraid_health action=setup, "
|
||||
f"or create {CREDENTIALS_ENV_PATH} with UNRAID_API_URL and UNRAID_API_KEY "
|
||||
f"(cp .env.example {CREDENTIALS_ENV_PATH} to get started)."
|
||||
) from e
|
||||
except TimeoutError as e:
|
||||
logger.exception(
|
||||
f"Timeout in unraid_{tool_name} action={action}: request exceeded time limit"
|
||||
)
|
||||
raise ToolError(
|
||||
f"Request timed out executing {tool_name}/{action}. The Unraid API did not respond in time."
|
||||
) from e
|
||||
except Exception as e:
|
||||
logger.exception(f"Error in unraid_{tool_name} action={action}")
|
||||
raise ToolError(
|
||||
f"Failed to execute {tool_name}/{action}. Check server logs for details."
|
||||
) from e
|
||||
|
||||
110
unraid_mcp/core/guards.py
Normal file
110
unraid_mcp/core/guards.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Destructive action gating via MCP elicitation.
|
||||
|
||||
Provides gate_destructive_action() — a single call to guard any destructive
|
||||
tool action with interactive user confirmation or confirm=True bypass.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastmcp import Context
|
||||
|
||||
from ..config.logging import logger
|
||||
from .exceptions import ToolError
|
||||
|
||||
|
||||
class _ConfirmAction(BaseModel):
|
||||
confirmed: bool = Field(False, description="Check the box to confirm and proceed")
|
||||
|
||||
|
||||
async def elicit_destructive_confirmation(
|
||||
ctx: "Context | None", action: str, description: str
|
||||
) -> bool:
|
||||
"""Prompt the user to confirm a destructive action via MCP elicitation.
|
||||
|
||||
Args:
|
||||
ctx: The MCP context. If None, returns False immediately.
|
||||
action: Action name shown in the prompt.
|
||||
description: Human-readable description of what the action will do.
|
||||
|
||||
Returns:
|
||||
True if the user confirmed, False otherwise.
|
||||
"""
|
||||
if ctx is None:
|
||||
logger.warning(
|
||||
"Cannot elicit confirmation for '%s': no MCP context available. "
|
||||
"Re-run with confirm=True to bypass elicitation.",
|
||||
action,
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
result = await ctx.elicit(
|
||||
message=(
|
||||
f"**Confirm destructive action: `{action}`**\n\n"
|
||||
f"{description}\n\n"
|
||||
"Are you sure you want to proceed?"
|
||||
),
|
||||
response_type=_ConfirmAction,
|
||||
)
|
||||
except NotImplementedError:
|
||||
logger.warning(
|
||||
"MCP client does not support elicitation for action '%s'. "
|
||||
"Re-run with confirm=True to bypass.",
|
||||
action,
|
||||
)
|
||||
return False
|
||||
|
||||
if result.action != "accept":
|
||||
logger.info("Destructive action '%s' declined by user (%s).", action, result.action)
|
||||
return False
|
||||
|
||||
confirmed: bool = result.data.confirmed # type: ignore[union-attr]
|
||||
if not confirmed:
|
||||
logger.info("Destructive action '%s' not confirmed by user.", action)
|
||||
return confirmed
|
||||
|
||||
|
||||
async def gate_destructive_action(
|
||||
ctx: "Context | None",
|
||||
action: str,
|
||||
destructive_actions: set[str],
|
||||
confirm: bool,
|
||||
description: str | dict[str, str],
|
||||
) -> None:
|
||||
"""Gate a destructive action with elicitation or confirm=True bypass.
|
||||
|
||||
Does nothing if the action is not in destructive_actions or confirm=True.
|
||||
Otherwise calls elicit_destructive_confirmation; raises ToolError if the
|
||||
user declines or elicitation is unavailable.
|
||||
|
||||
Args:
|
||||
ctx: MCP context for elicitation (None skips elicitation).
|
||||
action: The action being requested.
|
||||
destructive_actions: Set of action names considered destructive.
|
||||
confirm: When True, bypasses elicitation and proceeds immediately.
|
||||
description: Human-readable description of the action's impact.
|
||||
Pass a str when one description covers all destructive actions.
|
||||
Pass a dict[action_name, description] when descriptions differ.
|
||||
"""
|
||||
if action not in destructive_actions:
|
||||
return
|
||||
|
||||
if confirm:
|
||||
logger.info("Destructive action '%s' bypassed via confirm=True.", action)
|
||||
return
|
||||
|
||||
if isinstance(description, dict):
|
||||
desc = description.get(action)
|
||||
if desc is None:
|
||||
raise ToolError(f"Missing destructive-action description for '{action}'.")
|
||||
else:
|
||||
desc = description
|
||||
confirmed = await elicit_destructive_confirmation(ctx, action, desc)
|
||||
if not confirmed:
|
||||
raise ToolError(
|
||||
f"Action '{action}' was not confirmed. Re-run with confirm=True to bypass elicitation."
|
||||
)
|
||||
163
unraid_mcp/core/setup.py
Normal file
163
unraid_mcp/core/setup.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""Interactive credential setup via MCP elicitation.
|
||||
|
||||
When UNRAID_API_URL or UNRAID_API_KEY are absent, tools call
|
||||
`elicit_and_configure(ctx)` to collect them from the user and persist
|
||||
them to ~/.unraid-mcp/.env with restricted permissions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastmcp import Context
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import (
|
||||
CREDENTIALS_DIR,
|
||||
CREDENTIALS_ENV_PATH,
|
||||
PROJECT_ROOT,
|
||||
apply_runtime_config,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class _UnraidCredentials:
|
||||
api_url: str
|
||||
api_key: str
|
||||
|
||||
|
||||
async def elicit_reset_confirmation(ctx: Context | None, current_url: str) -> bool:
|
||||
"""Ask the user whether to overwrite existing credentials.
|
||||
|
||||
Args:
|
||||
ctx: The MCP context for elicitation. If None, returns False immediately.
|
||||
current_url: The currently configured URL and status (displayed for context).
|
||||
|
||||
Returns:
|
||||
True if the user confirmed the reset, False otherwise.
|
||||
"""
|
||||
if ctx is None:
|
||||
return False
|
||||
|
||||
try:
|
||||
result = await ctx.elicit(
|
||||
message=(
|
||||
"Credentials are already configured.\n\n"
|
||||
f"**Current URL:** `{current_url}`\n\n"
|
||||
"Do you want to reset your API URL and key?"
|
||||
),
|
||||
response_type=bool,
|
||||
)
|
||||
except NotImplementedError:
|
||||
# Client doesn't support elicitation — treat as "proceed with reset" so
|
||||
# non-interactive clients (stdio, CI) are not permanently blocked from
|
||||
# reconfiguring credentials.
|
||||
logger.warning(
|
||||
"MCP client does not support elicitation for reset confirmation — proceeding with reset."
|
||||
)
|
||||
return True
|
||||
|
||||
if result.action != "accept":
|
||||
logger.info("Credential reset declined by user (%s).", result.action)
|
||||
return False
|
||||
|
||||
confirmed: bool = result.data # type: ignore[union-attr]
|
||||
return confirmed
|
||||
|
||||
|
||||
async def elicit_and_configure(ctx: Context | None) -> bool:
|
||||
"""Prompt the user for Unraid credentials via MCP elicitation.
|
||||
|
||||
Writes accepted credentials to CREDENTIALS_ENV_PATH and applies them
|
||||
to the running process via apply_runtime_config().
|
||||
|
||||
Args:
|
||||
ctx: The MCP context for elicitation. If None, returns False immediately
|
||||
(no context available to prompt the user).
|
||||
|
||||
Returns:
|
||||
True if credentials were accepted and applied, False if declined/cancelled
|
||||
or if the MCP client does not support elicitation.
|
||||
"""
|
||||
if ctx is None:
|
||||
logger.warning(
|
||||
"Cannot elicit credentials: no MCP context available. "
|
||||
"Run unraid(action=health, subaction=setup) to configure credentials."
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
result = await ctx.elicit(
|
||||
message=(
|
||||
"Unraid MCP needs your Unraid server credentials to connect.\n\n"
|
||||
"• **API URL**: Your Unraid GraphQL endpoint "
|
||||
"(e.g. `https://10-1-0-2.xxx.myunraid.net:31337`)\n"
|
||||
"• **API Key**: Found in Unraid → Settings → Management Access → API Keys"
|
||||
),
|
||||
response_type=_UnraidCredentials,
|
||||
)
|
||||
except NotImplementedError:
|
||||
logger.warning(
|
||||
"MCP client does not support elicitation. "
|
||||
"Use unraid(action=health, subaction=setup) or create %s manually.",
|
||||
CREDENTIALS_ENV_PATH,
|
||||
)
|
||||
return False
|
||||
|
||||
if result.action != "accept":
|
||||
logger.warning("Credential elicitation %s — server remains unconfigured.", result.action)
|
||||
return False
|
||||
|
||||
api_url: str = result.data.api_url.rstrip("/") # type: ignore[union-attr]
|
||||
api_key: str = result.data.api_key.strip() # type: ignore[union-attr]
|
||||
|
||||
_write_env(api_url, api_key)
|
||||
apply_runtime_config(api_url, api_key)
|
||||
|
||||
logger.info("Credentials configured via elicitation and persisted to %s.", CREDENTIALS_ENV_PATH)
|
||||
return True
|
||||
|
||||
|
||||
def _write_env(api_url: str, api_key: str) -> None:
|
||||
"""Write or update credentials in CREDENTIALS_ENV_PATH.
|
||||
|
||||
Creates CREDENTIALS_DIR (mode 700) if needed. On first run, seeds from
|
||||
.env.example to preserve comments and structure. Sets file mode to 600.
|
||||
"""
|
||||
# Ensure directory exists with restricted permissions (chmod after to bypass umask)
|
||||
CREDENTIALS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
CREDENTIALS_DIR.chmod(0o700)
|
||||
|
||||
if CREDENTIALS_ENV_PATH.exists():
|
||||
template_lines = CREDENTIALS_ENV_PATH.read_text().splitlines()
|
||||
else:
|
||||
example_path = PROJECT_ROOT / ".env.example"
|
||||
template_lines = example_path.read_text().splitlines() if example_path.exists() else []
|
||||
|
||||
# Replace credentials in-place; append at end if not found in template
|
||||
url_written = False
|
||||
key_written = False
|
||||
new_lines: list[str] = []
|
||||
for line in template_lines:
|
||||
stripped = line.strip()
|
||||
if stripped.startswith("UNRAID_API_URL="):
|
||||
new_lines.append(f"UNRAID_API_URL={api_url}")
|
||||
url_written = True
|
||||
elif stripped.startswith("UNRAID_API_KEY="):
|
||||
new_lines.append(f"UNRAID_API_KEY={api_key}")
|
||||
key_written = True
|
||||
else:
|
||||
new_lines.append(line)
|
||||
|
||||
# If not found in template (empty or missing keys), append at end
|
||||
if not url_written:
|
||||
new_lines.append(f"UNRAID_API_URL={api_url}")
|
||||
if not key_written:
|
||||
new_lines.append(f"UNRAID_API_KEY={api_key}")
|
||||
|
||||
CREDENTIALS_ENV_PATH.write_text("\n".join(new_lines) + "\n")
|
||||
CREDENTIALS_ENV_PATH.chmod(0o600)
|
||||
logger.info("Credentials written to %s (mode 600)", CREDENTIALS_ENV_PATH)
|
||||
@@ -9,38 +9,19 @@ from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
@dataclass(slots=True)
|
||||
class SubscriptionData:
|
||||
"""Container for subscription data with metadata."""
|
||||
"""Container for subscription data with metadata.
|
||||
|
||||
Note: last_updated must be timezone-aware (use datetime.now(UTC)).
|
||||
"""
|
||||
|
||||
data: dict[str, Any]
|
||||
last_updated: datetime
|
||||
last_updated: datetime # Must be timezone-aware (UTC)
|
||||
subscription_type: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemHealth:
|
||||
"""Container for system health status information."""
|
||||
|
||||
is_healthy: bool
|
||||
issues: list[str]
|
||||
warnings: list[str]
|
||||
last_checked: datetime
|
||||
component_status: dict[str, str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class APIResponse:
|
||||
"""Container for standardized API response data."""
|
||||
|
||||
success: bool
|
||||
data: dict[str, Any] | None = None
|
||||
error: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
# Type aliases for common data structures
|
||||
ConfigValue = str | int | bool | float | None
|
||||
ConfigDict = dict[str, ConfigValue]
|
||||
GraphQLVariables = dict[str, Any]
|
||||
HealthStatus = dict[str, str | bool | int | list[Any]]
|
||||
def __post_init__(self) -> None:
|
||||
if self.last_updated.tzinfo is None:
|
||||
raise ValueError("last_updated must be timezone-aware; use datetime.now(UTC)")
|
||||
if not self.subscription_type.strip():
|
||||
raise ValueError("subscription_type must be a non-empty string")
|
||||
|
||||
97
unraid_mcp/core/utils.py
Normal file
97
unraid_mcp/core/utils.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""Shared utility functions for Unraid MCP tools."""
|
||||
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
_MISSING: object = object()
|
||||
|
||||
|
||||
def safe_get(data: dict[str, Any], *keys: str, default: Any = None) -> Any:
|
||||
"""Safely traverse nested dict keys, handling missing keys and None intermediates.
|
||||
|
||||
Args:
|
||||
data: The root dictionary to traverse.
|
||||
*keys: Sequence of keys to follow.
|
||||
default: Value to return if any key is absent or any intermediate value
|
||||
is not a dict.
|
||||
|
||||
Returns:
|
||||
The value at the end of the key chain (including explicit ``None``),
|
||||
or ``default`` if a key is missing or an intermediate is not a dict.
|
||||
This preserves the distinction between ``{"k": None}`` (returns ``None``)
|
||||
and ``{}`` (returns ``default``).
|
||||
"""
|
||||
current: Any = data
|
||||
for key in keys:
|
||||
if not isinstance(current, dict):
|
||||
return default
|
||||
current = current.get(key, _MISSING)
|
||||
if current is _MISSING:
|
||||
return default
|
||||
return current
|
||||
|
||||
|
||||
def format_bytes(bytes_value: int | None) -> str:
|
||||
"""Format byte values into human-readable sizes.
|
||||
|
||||
Args:
|
||||
bytes_value: Number of bytes, or None.
|
||||
|
||||
Returns:
|
||||
Human-readable string like "1.00 GB" or "N/A" if input is None/invalid.
|
||||
"""
|
||||
if bytes_value is None:
|
||||
return "N/A"
|
||||
try:
|
||||
value = float(int(bytes_value))
|
||||
except (ValueError, TypeError):
|
||||
return "N/A"
|
||||
for unit in ["B", "KB", "MB", "GB", "TB", "PB"]:
|
||||
if value < 1024.0:
|
||||
return f"{value:.2f} {unit}"
|
||||
value /= 1024.0
|
||||
return f"{value:.2f} EB"
|
||||
|
||||
|
||||
def safe_display_url(url: str | None) -> str | None:
|
||||
"""Return a redacted URL showing only scheme + host + port.
|
||||
|
||||
Strips path, query parameters, credentials, and fragments to avoid
|
||||
leaking internal network topology or embedded secrets (CWE-200).
|
||||
"""
|
||||
if not url:
|
||||
return None
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
host = parsed.hostname or "unknown"
|
||||
if parsed.port:
|
||||
return f"{parsed.scheme}://{host}:{parsed.port}"
|
||||
return f"{parsed.scheme}://{host}"
|
||||
except ValueError:
|
||||
# urlparse raises ValueError for invalid URLs (e.g. contains control chars)
|
||||
return "<unparseable>"
|
||||
|
||||
|
||||
def format_kb(k: Any) -> str:
|
||||
"""Format kilobyte values into human-readable sizes.
|
||||
|
||||
Args:
|
||||
k: Number of kilobytes, or None.
|
||||
|
||||
Returns:
|
||||
Human-readable string like "1.00 GB" or "N/A" if input is None/invalid.
|
||||
"""
|
||||
if k is None:
|
||||
return "N/A"
|
||||
try:
|
||||
k = int(k)
|
||||
except (ValueError, TypeError):
|
||||
return "N/A"
|
||||
if k >= 1024 * 1024 * 1024:
|
||||
return f"{k / (1024 * 1024 * 1024):.2f} TB"
|
||||
if k >= 1024 * 1024:
|
||||
return f"{k / (1024 * 1024):.2f} GB"
|
||||
if k >= 1024:
|
||||
return f"{k / 1024:.2f} MB"
|
||||
return f"{k:.2f} KB"
|
||||
@@ -11,12 +11,19 @@ import sys
|
||||
|
||||
async def shutdown_cleanup() -> None:
|
||||
"""Cleanup resources on server shutdown."""
|
||||
try:
|
||||
from .subscriptions.manager import subscription_manager
|
||||
|
||||
await subscription_manager.stop_all()
|
||||
except Exception as e:
|
||||
print(f"Error stopping subscriptions during cleanup: {e}", file=sys.stderr)
|
||||
|
||||
try:
|
||||
from .core.client import close_http_client
|
||||
|
||||
await close_http_client()
|
||||
except Exception as e:
|
||||
print(f"Error during cleanup: {e}")
|
||||
print(f"Error during cleanup: {e}", file=sys.stderr)
|
||||
|
||||
|
||||
def _run_shutdown_cleanup() -> None:
|
||||
|
||||
@@ -7,34 +7,79 @@ separate modules for configuration, core functionality, subscriptions, and tools
|
||||
import sys
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.server.middleware.caching import CallToolSettings, ResponseCachingMiddleware
|
||||
from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
|
||||
from fastmcp.server.middleware.logging import LoggingMiddleware
|
||||
from fastmcp.server.middleware.rate_limiting import SlidingWindowRateLimitingMiddleware
|
||||
from fastmcp.server.middleware.response_limiting import ResponseLimitingMiddleware
|
||||
|
||||
from .config.logging import logger
|
||||
from .config.settings import (
|
||||
UNRAID_API_KEY,
|
||||
UNRAID_API_URL,
|
||||
UNRAID_MCP_HOST,
|
||||
UNRAID_MCP_PORT,
|
||||
UNRAID_MCP_TRANSPORT,
|
||||
UNRAID_VERIFY_SSL,
|
||||
VERSION,
|
||||
validate_required_config,
|
||||
)
|
||||
from .subscriptions.diagnostics import register_diagnostic_tools
|
||||
from .subscriptions.resources import register_subscription_resources
|
||||
from .tools.array import register_array_tool
|
||||
from .tools.docker import register_docker_tool
|
||||
from .tools.health import register_health_tool
|
||||
from .tools.info import register_info_tool
|
||||
from .tools.keys import register_keys_tool
|
||||
from .tools.notifications import register_notifications_tool
|
||||
from .tools.rclone import register_rclone_tool
|
||||
from .tools.storage import register_storage_tool
|
||||
from .tools.users import register_users_tool
|
||||
from .tools.virtualization import register_vm_tool
|
||||
from .tools.unraid import register_unraid_tool
|
||||
|
||||
|
||||
# Middleware chain order matters — each layer wraps everything inside it:
|
||||
# logging → error_handling → rate_limiter → response_limiter → cache → tool
|
||||
|
||||
# 1. Log every tools/call and resources/read: method, duration, errors.
|
||||
# Outermost so it captures errors after they've been converted by error_handling.
|
||||
_logging_middleware = LoggingMiddleware(
|
||||
logger=logger,
|
||||
methods=["tools/call", "resources/read"],
|
||||
)
|
||||
|
||||
# 2. Catch any unhandled exceptions and convert to proper MCP errors.
|
||||
# Tracks error_counts per (exception_type:method) for health diagnose.
|
||||
error_middleware = ErrorHandlingMiddleware(
|
||||
logger=logger,
|
||||
include_traceback=True,
|
||||
)
|
||||
|
||||
# 3. Unraid API rate limit: 100 requests per 10 seconds.
|
||||
# Use a sliding window that stays comfortably under that cap.
|
||||
_rate_limiter = SlidingWindowRateLimitingMiddleware(max_requests=90, window_minutes=1)
|
||||
|
||||
# 4. Cap tool responses at 512 KB to protect the client context window.
|
||||
# Oversized responses are truncated with a clear suffix rather than erroring.
|
||||
_response_limiter = ResponseLimitingMiddleware(max_size=512_000)
|
||||
|
||||
# 5. Cache tool calls in-memory (MemoryStore default — no extra deps).
|
||||
# Short 30 s TTL absorbs burst duplicate requests while keeping data fresh.
|
||||
# Destructive calls won't hit the cache in practice (unique confirm=True + IDs).
|
||||
cache_middleware = ResponseCachingMiddleware(
|
||||
call_tool_settings=CallToolSettings(
|
||||
ttl=30,
|
||||
included_tools=["unraid"],
|
||||
),
|
||||
# Disable caching for list/resource/prompt — those are cheap.
|
||||
list_tools_settings={"enabled": False},
|
||||
list_resources_settings={"enabled": False},
|
||||
list_prompts_settings={"enabled": False},
|
||||
read_resource_settings={"enabled": False},
|
||||
get_prompt_settings={"enabled": False},
|
||||
)
|
||||
|
||||
# Initialize FastMCP instance
|
||||
mcp = FastMCP(
|
||||
name="Unraid MCP Server",
|
||||
instructions="Provides tools to interact with an Unraid server's GraphQL API.",
|
||||
version=VERSION,
|
||||
middleware=[
|
||||
_logging_middleware,
|
||||
error_middleware,
|
||||
_rate_limiter,
|
||||
_response_limiter,
|
||||
cache_middleware,
|
||||
],
|
||||
)
|
||||
|
||||
# Note: SubscriptionManager singleton is defined in subscriptions/manager.py
|
||||
@@ -44,27 +89,14 @@ mcp = FastMCP(
|
||||
def register_all_modules() -> None:
|
||||
"""Register all tools and resources with the MCP instance."""
|
||||
try:
|
||||
# Register subscription resources first
|
||||
# Register subscription resources and diagnostic tools
|
||||
register_subscription_resources(mcp)
|
||||
logger.info("Subscription resources registered")
|
||||
register_diagnostic_tools(mcp)
|
||||
logger.info("Subscription resources and diagnostic tools registered")
|
||||
|
||||
# Register all consolidated tools
|
||||
registrars = [
|
||||
register_info_tool,
|
||||
register_array_tool,
|
||||
register_storage_tool,
|
||||
register_docker_tool,
|
||||
register_vm_tool,
|
||||
register_notifications_tool,
|
||||
register_rclone_tool,
|
||||
register_users_tool,
|
||||
register_keys_tool,
|
||||
register_health_tool,
|
||||
]
|
||||
for registrar in registrars:
|
||||
registrar(mcp)
|
||||
|
||||
logger.info(f"All {len(registrars)} tools registered successfully - Server ready!")
|
||||
# Register the consolidated unraid tool
|
||||
register_unraid_tool(mcp)
|
||||
logger.info("unraid tool registered successfully - Server ready!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register modules: {e}", exc_info=True)
|
||||
@@ -73,20 +105,25 @@ def register_all_modules() -> None:
|
||||
|
||||
def run_server() -> None:
|
||||
"""Run the MCP server with the configured transport."""
|
||||
# Log configuration
|
||||
if UNRAID_API_URL:
|
||||
logger.info(f"UNRAID_API_URL loaded: {UNRAID_API_URL[:20]}...")
|
||||
else:
|
||||
logger.warning("UNRAID_API_URL not found in environment or .env file.")
|
||||
# Validate required configuration before anything else
|
||||
is_valid, missing = validate_required_config()
|
||||
if not is_valid:
|
||||
logger.warning(
|
||||
f"Missing configuration: {', '.join(missing)}. "
|
||||
"Server will prompt for credentials on first tool call via elicitation."
|
||||
)
|
||||
|
||||
if UNRAID_API_KEY:
|
||||
logger.info("UNRAID_API_KEY loaded: ****")
|
||||
else:
|
||||
logger.warning("UNRAID_API_KEY not found in environment or .env file.")
|
||||
# Log configuration (delegated to shared function)
|
||||
from .config.logging import log_configuration_status
|
||||
|
||||
logger.info(f"UNRAID_MCP_PORT set to: {UNRAID_MCP_PORT}")
|
||||
logger.info(f"UNRAID_MCP_HOST set to: {UNRAID_MCP_HOST}")
|
||||
logger.info(f"UNRAID_MCP_TRANSPORT set to: {UNRAID_MCP_TRANSPORT}")
|
||||
log_configuration_status(logger)
|
||||
|
||||
if UNRAID_VERIFY_SSL is False:
|
||||
logger.warning(
|
||||
"SSL VERIFICATION DISABLED (UNRAID_VERIFY_SSL=false). "
|
||||
"Connections to Unraid API are vulnerable to man-in-the-middle attacks. "
|
||||
"Only use this in trusted networks or for development."
|
||||
)
|
||||
|
||||
# Register all modules
|
||||
register_all_modules()
|
||||
|
||||
@@ -7,7 +7,8 @@ development and debugging purposes.
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime
|
||||
import re
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
|
||||
import websockets
|
||||
@@ -17,9 +18,66 @@ from websockets.typing import Subprotocol
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_KEY, UNRAID_API_URL
|
||||
from ..core.exceptions import ToolError
|
||||
from ..core.utils import safe_display_url
|
||||
from .manager import subscription_manager
|
||||
from .resources import ensure_subscriptions_started
|
||||
from .utils import build_ws_ssl_context
|
||||
from .utils import _analyze_subscription_status, build_ws_ssl_context, build_ws_url
|
||||
|
||||
|
||||
# Schema field names that appear inside the selection set of allowed subscriptions.
|
||||
# The regex _SUBSCRIPTION_NAME_PATTERN extracts the first identifier after the
|
||||
# opening "{", so we list the actual field names used in queries (e.g. "logFile"),
|
||||
# NOT the operation-level names (e.g. "logFileSubscription").
|
||||
_ALLOWED_SUBSCRIPTION_FIELDS = frozenset(
|
||||
{
|
||||
"logFile",
|
||||
"containerStats",
|
||||
"cpu",
|
||||
"memory",
|
||||
"array",
|
||||
"network",
|
||||
"docker",
|
||||
"vm",
|
||||
}
|
||||
)
|
||||
|
||||
# Pattern: must start with "subscription" keyword, then extract the first selected
|
||||
# field name (the word immediately after "{").
|
||||
_SUBSCRIPTION_NAME_PATTERN = re.compile(r"^\s*subscription\b[^{]*\{\s*(\w+)", re.IGNORECASE)
|
||||
# Reject any query that contains a bare "mutation" or "query" operation keyword.
|
||||
_FORBIDDEN_KEYWORDS = re.compile(r"\b(mutation|query)\b", re.IGNORECASE)
|
||||
|
||||
|
||||
def _validate_subscription_query(query: str) -> str:
|
||||
"""Validate that a subscription query is safe to execute.
|
||||
|
||||
Only allows subscription operations targeting whitelisted schema field names.
|
||||
Rejects any query containing mutation/query keywords.
|
||||
|
||||
Returns:
|
||||
The extracted field name (e.g. "logFile").
|
||||
|
||||
Raises:
|
||||
ToolError: If the query fails validation.
|
||||
"""
|
||||
if _FORBIDDEN_KEYWORDS.search(query):
|
||||
raise ToolError("Query rejected: must be a subscription, not a mutation or query.")
|
||||
|
||||
match = _SUBSCRIPTION_NAME_PATTERN.match(query)
|
||||
if not match:
|
||||
raise ToolError(
|
||||
"Query rejected: must start with 'subscription' and contain a valid "
|
||||
'subscription field. Example: subscription { logFile(path: "/var/log/syslog") { content } }'
|
||||
)
|
||||
|
||||
field_name = match.group(1)
|
||||
if field_name not in _ALLOWED_SUBSCRIPTION_FIELDS:
|
||||
raise ToolError(
|
||||
f"Subscription field '{field_name}' is not allowed. "
|
||||
f"Allowed fields: {sorted(_ALLOWED_SUBSCRIPTION_FIELDS)}"
|
||||
)
|
||||
|
||||
return field_name
|
||||
|
||||
|
||||
def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
@@ -34,6 +92,8 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
"""Test a GraphQL subscription query directly to debug schema issues.
|
||||
|
||||
Use this to find working subscription field names and structure.
|
||||
Only whitelisted schema fields are permitted (logFile, containerStats,
|
||||
cpu, memory, array, network, docker, vm).
|
||||
|
||||
Args:
|
||||
subscription_query: The GraphQL subscription query to test
|
||||
@@ -41,16 +101,18 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
Returns:
|
||||
Dict containing test results and response data
|
||||
"""
|
||||
try:
|
||||
logger.info(f"[TEST_SUBSCRIPTION] Testing query: {subscription_query}")
|
||||
field_name = _validate_subscription_query(subscription_query)
|
||||
|
||||
# Build WebSocket URL
|
||||
if not UNRAID_API_URL:
|
||||
raise ToolError("UNRAID_API_URL is not configured")
|
||||
ws_url = (
|
||||
UNRAID_API_URL.replace("https://", "wss://").replace("http://", "ws://")
|
||||
+ "/graphql"
|
||||
)
|
||||
try:
|
||||
logger.info(f"[TEST_SUBSCRIPTION] Testing validated subscription field '{field_name}'")
|
||||
|
||||
try:
|
||||
ws_url = build_ws_url()
|
||||
except ValueError as e:
|
||||
logger.error("[TEST_SUBSCRIPTION] Invalid WebSocket URL configuration: %s", e)
|
||||
raise ToolError(
|
||||
"Subscription test failed: invalid WebSocket URL configuration."
|
||||
) from e
|
||||
|
||||
ssl_context = build_ws_ssl_context(ws_url)
|
||||
|
||||
@@ -59,6 +121,7 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
ws_url,
|
||||
subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")],
|
||||
ssl=ssl_context,
|
||||
open_timeout=10,
|
||||
ping_interval=30,
|
||||
ping_timeout=10,
|
||||
) as websocket:
|
||||
@@ -67,7 +130,7 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
json.dumps(
|
||||
{
|
||||
"type": "connection_init",
|
||||
"payload": {"headers": {"X-API-Key": UNRAID_API_KEY}},
|
||||
"payload": {"x-api-key": UNRAID_API_KEY},
|
||||
}
|
||||
)
|
||||
)
|
||||
@@ -77,7 +140,13 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
init_response = json.loads(response)
|
||||
|
||||
if init_response.get("type") != "connection_ack":
|
||||
return {"error": f"Connection failed: {init_response}"}
|
||||
logger.error(
|
||||
"[TEST_SUBSCRIPTION] Connection not acknowledged: %s",
|
||||
init_response,
|
||||
)
|
||||
raise ToolError(
|
||||
"Subscription test failed: WebSocket connection was not acknowledged."
|
||||
)
|
||||
|
||||
# Send subscription
|
||||
await websocket.send(
|
||||
@@ -102,9 +171,13 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
"note": "Connection successful, subscription may be waiting for events",
|
||||
}
|
||||
|
||||
except ToolError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[TEST_SUBSCRIPTION] Error: {e}", exc_info=True)
|
||||
return {"error": str(e), "query_tested": subscription_query}
|
||||
logger.error("[TEST_SUBSCRIPTION] Error: %s", e, exc_info=True)
|
||||
raise ToolError(
|
||||
"Subscription test failed: an unexpected error occurred. Check server logs for details."
|
||||
) from e
|
||||
|
||||
@mcp.tool()
|
||||
async def diagnose_subscriptions() -> dict[str, Any]:
|
||||
@@ -122,20 +195,29 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
logger.info("[DIAGNOSTIC] Running subscription diagnostics...")
|
||||
|
||||
# Get comprehensive status
|
||||
status = subscription_manager.get_subscription_status()
|
||||
status = await subscription_manager.get_subscription_status()
|
||||
|
||||
# Initialize connection issues list with proper type
|
||||
connection_issues: list[dict[str, Any]] = []
|
||||
# Analyze connection issues and error counts via shared helper.
|
||||
# Gates connection_issues on current failure state (Bug 5 fix).
|
||||
error_count, connection_issues = _analyze_subscription_status(status)
|
||||
|
||||
# Calculate WebSocket URL
|
||||
ws_url_display: str | None = None
|
||||
if UNRAID_API_URL:
|
||||
try:
|
||||
ws_url_display = build_ws_url()
|
||||
except ValueError:
|
||||
ws_url_display = None
|
||||
|
||||
# Add environment info with explicit typing
|
||||
diagnostic_info: dict[str, Any] = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"timestamp": datetime.now(UTC).isoformat(),
|
||||
"environment": {
|
||||
"auto_start_enabled": subscription_manager.auto_start_enabled,
|
||||
"max_reconnect_attempts": subscription_manager.max_reconnect_attempts,
|
||||
"unraid_api_url": UNRAID_API_URL[:50] + "..." if UNRAID_API_URL else None,
|
||||
"unraid_api_url": safe_display_url(UNRAID_API_URL),
|
||||
"api_key_configured": bool(UNRAID_API_KEY),
|
||||
"websocket_url": None,
|
||||
"websocket_url": ws_url_display,
|
||||
},
|
||||
"subscriptions": status,
|
||||
"summary": {
|
||||
@@ -147,40 +229,11 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
),
|
||||
"active_count": len(subscription_manager.active_subscriptions),
|
||||
"with_data": len(subscription_manager.resource_data),
|
||||
"in_error_state": 0,
|
||||
"in_error_state": error_count,
|
||||
"connection_issues": connection_issues,
|
||||
},
|
||||
}
|
||||
|
||||
# Calculate WebSocket URL
|
||||
if UNRAID_API_URL:
|
||||
if UNRAID_API_URL.startswith("https://"):
|
||||
ws_url = "wss://" + UNRAID_API_URL[len("https://") :]
|
||||
elif UNRAID_API_URL.startswith("http://"):
|
||||
ws_url = "ws://" + UNRAID_API_URL[len("http://") :]
|
||||
else:
|
||||
ws_url = UNRAID_API_URL
|
||||
if not ws_url.endswith("/graphql"):
|
||||
ws_url = ws_url.rstrip("/") + "/graphql"
|
||||
diagnostic_info["environment"]["websocket_url"] = ws_url
|
||||
|
||||
# Analyze issues
|
||||
for sub_name, sub_status in status.items():
|
||||
runtime = sub_status.get("runtime", {})
|
||||
connection_state = runtime.get("connection_state", "unknown")
|
||||
|
||||
if connection_state in ["error", "auth_failed", "timeout", "max_retries_exceeded"]:
|
||||
diagnostic_info["summary"]["in_error_state"] += 1
|
||||
|
||||
if runtime.get("last_error"):
|
||||
connection_issues.append(
|
||||
{
|
||||
"subscription": sub_name,
|
||||
"state": connection_state,
|
||||
"error": runtime["last_error"],
|
||||
}
|
||||
)
|
||||
|
||||
# Add troubleshooting recommendations
|
||||
recommendations: list[str] = []
|
||||
|
||||
@@ -227,7 +280,9 @@ def register_diagnostic_tools(mcp: FastMCP) -> None:
|
||||
return diagnostic_info
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[DIAGNOSTIC] Failed to generate diagnostics: {e}")
|
||||
raise ToolError(f"Failed to generate diagnostics: {e!s}") from e
|
||||
logger.error("[DIAGNOSTIC] Failed to generate diagnostics: %s", e, exc_info=True)
|
||||
raise ToolError(
|
||||
"Failed to generate diagnostics: an unexpected error occurred. Check server logs for details."
|
||||
) from e
|
||||
|
||||
logger.info("Subscription diagnostic tools registered successfully")
|
||||
|
||||
@@ -8,16 +8,71 @@ error handling, reconnection logic, and authentication.
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
import time
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any
|
||||
|
||||
import websockets
|
||||
from websockets.typing import Subprotocol
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_KEY, UNRAID_API_URL
|
||||
from ..config.settings import UNRAID_API_KEY
|
||||
from ..core.client import redact_sensitive
|
||||
from ..core.types import SubscriptionData
|
||||
from .utils import build_ws_ssl_context
|
||||
from .utils import build_ws_ssl_context, build_ws_url
|
||||
|
||||
|
||||
# Resource data size limits to prevent unbounded memory growth
|
||||
_MAX_RESOURCE_DATA_BYTES = 1_048_576 # 1MB
|
||||
_MAX_RESOURCE_DATA_LINES = 5_000
|
||||
# Minimum stable connection duration (seconds) before resetting reconnect counter
|
||||
_STABLE_CONNECTION_SECONDS = 30
|
||||
|
||||
|
||||
def _cap_log_content(data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Cap log content in subscription data to prevent unbounded memory growth.
|
||||
|
||||
Returns a new dict — does NOT mutate the input. If any nested 'content'
|
||||
field (from log subscriptions) exceeds the byte limit, truncate it to the
|
||||
most recent _MAX_RESOURCE_DATA_LINES lines.
|
||||
|
||||
The final content is guaranteed to be <= _MAX_RESOURCE_DATA_BYTES.
|
||||
"""
|
||||
result: dict[str, Any] = {}
|
||||
for key, value in data.items():
|
||||
if isinstance(value, dict):
|
||||
result[key] = _cap_log_content(value)
|
||||
elif (
|
||||
key == "content"
|
||||
and isinstance(value, str)
|
||||
# Pre-check uses byte count so multibyte UTF-8 chars cannot bypass the cap
|
||||
and len(value.encode("utf-8", errors="replace")) > _MAX_RESOURCE_DATA_BYTES
|
||||
):
|
||||
lines = value.splitlines()
|
||||
original_line_count = len(lines)
|
||||
|
||||
# Keep most recent lines first.
|
||||
if len(lines) > _MAX_RESOURCE_DATA_LINES:
|
||||
lines = lines[-_MAX_RESOURCE_DATA_LINES:]
|
||||
|
||||
truncated = "\n".join(lines)
|
||||
# Encode once and slice bytes instead of an O(n²) line-trim loop
|
||||
encoded = truncated.encode("utf-8", errors="replace")
|
||||
if len(encoded) > _MAX_RESOURCE_DATA_BYTES:
|
||||
truncated = encoded[-_MAX_RESOURCE_DATA_BYTES:].decode("utf-8", errors="ignore")
|
||||
# Strip partial first line that may have been cut mid-character
|
||||
nl_pos = truncated.find("\n")
|
||||
if nl_pos != -1:
|
||||
truncated = truncated[nl_pos + 1 :]
|
||||
|
||||
logger.warning(
|
||||
f"[RESOURCE] Capped log content from {original_line_count} to "
|
||||
f"{len(lines)} lines ({len(value)} -> {len(truncated)} chars)"
|
||||
)
|
||||
result[key] = truncated
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
class SubscriptionManager:
|
||||
@@ -26,8 +81,13 @@ class SubscriptionManager:
|
||||
def __init__(self) -> None:
|
||||
self.active_subscriptions: dict[str, asyncio.Task[None]] = {}
|
||||
self.resource_data: dict[str, SubscriptionData] = {}
|
||||
self.websocket: websockets.WebSocketServerProtocol | None = None
|
||||
self.subscription_lock = asyncio.Lock()
|
||||
# Two fine-grained locks instead of one coarse lock (P-01):
|
||||
# _task_lock guards active_subscriptions dict (task lifecycle).
|
||||
# _data_lock guards resource_data dict (WebSocket message writes + reads).
|
||||
# Splitting prevents WebSocket message updates from blocking tool reads
|
||||
# of active_subscriptions and vice versa.
|
||||
self._task_lock = asyncio.Lock()
|
||||
self._data_lock = asyncio.Lock()
|
||||
|
||||
# Configuration
|
||||
self.auto_start_enabled = (
|
||||
@@ -37,11 +97,22 @@ class SubscriptionManager:
|
||||
self.max_reconnect_attempts = int(os.getenv("UNRAID_MAX_RECONNECT_ATTEMPTS", "10"))
|
||||
self.connection_states: dict[str, str] = {} # Track connection state per subscription
|
||||
self.last_error: dict[str, str] = {} # Track last error per subscription
|
||||
self._connection_start_times: dict[str, float] = {} # Track when connections started
|
||||
|
||||
# Define subscription configurations
|
||||
self.subscription_configs = {
|
||||
"logFileSubscription": {
|
||||
"query": """
|
||||
from .queries import SNAPSHOT_ACTIONS
|
||||
|
||||
self.subscription_configs: dict[str, dict] = {
|
||||
action: {
|
||||
"query": query,
|
||||
"resource": f"unraid://live/{action}",
|
||||
"description": f"Real-time {action.replace('_', ' ')} data",
|
||||
"auto_start": True,
|
||||
}
|
||||
for action, query in SNAPSHOT_ACTIONS.items()
|
||||
}
|
||||
self.subscription_configs["logFileSubscription"] = {
|
||||
"query": """
|
||||
subscription LogFileSubscription($path: String!) {
|
||||
logFile(path: $path) {
|
||||
path
|
||||
@@ -50,10 +121,9 @@ class SubscriptionManager:
|
||||
}
|
||||
}
|
||||
""",
|
||||
"resource": "unraid://logs/stream",
|
||||
"description": "Real-time log file streaming",
|
||||
"auto_start": False, # Started manually with path parameter
|
||||
}
|
||||
"resource": "unraid://logs/stream",
|
||||
"description": "Real-time log file streaming",
|
||||
"auto_start": False, # Started manually with path parameter
|
||||
}
|
||||
|
||||
logger.info(
|
||||
@@ -105,8 +175,9 @@ class SubscriptionManager:
|
||||
# Reset connection tracking
|
||||
self.reconnect_attempts[subscription_name] = 0
|
||||
self.connection_states[subscription_name] = "starting"
|
||||
self._connection_start_times.pop(subscription_name, None)
|
||||
|
||||
async with self.subscription_lock:
|
||||
async with self._task_lock:
|
||||
try:
|
||||
task = asyncio.create_task(
|
||||
self._subscription_loop(subscription_name, query, variables or {})
|
||||
@@ -128,7 +199,7 @@ class SubscriptionManager:
|
||||
"""Stop a specific subscription."""
|
||||
logger.info(f"[SUBSCRIPTION:{subscription_name}] Stopping subscription...")
|
||||
|
||||
async with self.subscription_lock:
|
||||
async with self._task_lock:
|
||||
if subscription_name in self.active_subscriptions:
|
||||
task = self.active_subscriptions[subscription_name]
|
||||
task.cancel()
|
||||
@@ -138,10 +209,21 @@ class SubscriptionManager:
|
||||
logger.debug(f"[SUBSCRIPTION:{subscription_name}] Task cancelled successfully")
|
||||
del self.active_subscriptions[subscription_name]
|
||||
self.connection_states[subscription_name] = "stopped"
|
||||
self._connection_start_times.pop(subscription_name, None)
|
||||
logger.info(f"[SUBSCRIPTION:{subscription_name}] Subscription stopped")
|
||||
else:
|
||||
logger.warning(f"[SUBSCRIPTION:{subscription_name}] No active subscription to stop")
|
||||
|
||||
async def stop_all(self) -> None:
|
||||
"""Stop all active subscriptions (called during server shutdown)."""
|
||||
subscription_names = list(self.active_subscriptions.keys())
|
||||
for name in subscription_names:
|
||||
try:
|
||||
await self.stop_subscription(name)
|
||||
except Exception as e:
|
||||
logger.error(f"[SHUTDOWN] Error stopping subscription '{name}': {e}", exc_info=True)
|
||||
logger.info(f"[SHUTDOWN] Stopped {len(subscription_names)} subscription(s)")
|
||||
|
||||
async def _subscription_loop(
|
||||
self, subscription_name: str, query: str, variables: dict[str, Any] | None
|
||||
) -> None:
|
||||
@@ -165,20 +247,7 @@ class SubscriptionManager:
|
||||
break
|
||||
|
||||
try:
|
||||
# Build WebSocket URL with detailed logging
|
||||
if not UNRAID_API_URL:
|
||||
raise ValueError("UNRAID_API_URL is not configured")
|
||||
|
||||
if UNRAID_API_URL.startswith("https://"):
|
||||
ws_url = "wss://" + UNRAID_API_URL[len("https://") :]
|
||||
elif UNRAID_API_URL.startswith("http://"):
|
||||
ws_url = "ws://" + UNRAID_API_URL[len("http://") :]
|
||||
else:
|
||||
ws_url = UNRAID_API_URL
|
||||
|
||||
if not ws_url.endswith("/graphql"):
|
||||
ws_url = ws_url.rstrip("/") + "/graphql"
|
||||
|
||||
ws_url = build_ws_url()
|
||||
logger.debug(f"[WEBSOCKET:{subscription_name}] Connecting to: {ws_url}")
|
||||
logger.debug(
|
||||
f"[WEBSOCKET:{subscription_name}] API Key present: {'Yes' if UNRAID_API_KEY else 'No'}"
|
||||
@@ -195,6 +264,7 @@ class SubscriptionManager:
|
||||
async with websockets.connect(
|
||||
ws_url,
|
||||
subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")],
|
||||
open_timeout=connect_timeout,
|
||||
ping_interval=20,
|
||||
ping_timeout=10,
|
||||
close_timeout=10,
|
||||
@@ -206,9 +276,9 @@ class SubscriptionManager:
|
||||
)
|
||||
self.connection_states[subscription_name] = "connected"
|
||||
|
||||
# Reset retry count on successful connection
|
||||
self.reconnect_attempts[subscription_name] = 0
|
||||
retry_delay = 5 # Reset delay
|
||||
# Track connection start time — only reset retry counter
|
||||
# after the connection proves stable (>30s connected)
|
||||
self._connection_start_times[subscription_name] = time.monotonic()
|
||||
|
||||
# Initialize GraphQL-WS protocol
|
||||
logger.debug(
|
||||
@@ -219,9 +289,8 @@ class SubscriptionManager:
|
||||
|
||||
if UNRAID_API_KEY:
|
||||
logger.debug(f"[AUTH:{subscription_name}] Adding authentication payload")
|
||||
# Use standard X-API-Key header format (matching HTTP client)
|
||||
auth_payload = {"headers": {"X-API-Key": UNRAID_API_KEY}}
|
||||
init_payload["payload"] = auth_payload
|
||||
# Use graphql-ws connectionParams format (direct key, not nested headers)
|
||||
init_payload["payload"] = {"x-api-key": UNRAID_API_KEY}
|
||||
else:
|
||||
logger.warning(
|
||||
f"[AUTH:{subscription_name}] No API key available for authentication"
|
||||
@@ -290,7 +359,9 @@ class SubscriptionManager:
|
||||
f"[SUBSCRIPTION:{subscription_name}] Subscription message type: {start_type}"
|
||||
)
|
||||
logger.debug(f"[SUBSCRIPTION:{subscription_name}] Query: {query[:100]}...")
|
||||
logger.debug(f"[SUBSCRIPTION:{subscription_name}] Variables: {variables}")
|
||||
logger.debug(
|
||||
f"[SUBSCRIPTION:{subscription_name}] Variables: {redact_sensitive(variables)}"
|
||||
)
|
||||
|
||||
await websocket.send(json.dumps(subscription_message))
|
||||
logger.info(
|
||||
@@ -326,11 +397,18 @@ class SubscriptionManager:
|
||||
logger.info(
|
||||
f"[DATA:{subscription_name}] Received subscription data update"
|
||||
)
|
||||
self.resource_data[subscription_name] = SubscriptionData(
|
||||
data=payload["data"],
|
||||
last_updated=datetime.now(),
|
||||
capped_data = (
|
||||
_cap_log_content(payload["data"])
|
||||
if isinstance(payload["data"], dict)
|
||||
else payload["data"]
|
||||
)
|
||||
new_entry = SubscriptionData(
|
||||
data=capped_data,
|
||||
last_updated=datetime.now(UTC),
|
||||
subscription_type=subscription_name,
|
||||
)
|
||||
async with self._data_lock:
|
||||
self.resource_data[subscription_name] = new_entry
|
||||
logger.debug(
|
||||
f"[RESOURCE:{subscription_name}] Resource data updated successfully"
|
||||
)
|
||||
@@ -391,7 +469,8 @@ class SubscriptionManager:
|
||||
logger.error(f"[PROTOCOL:{subscription_name}] JSON decode error: {e}")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[DATA:{subscription_name}] Error processing message: {e}"
|
||||
f"[DATA:{subscription_name}] Error processing message: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
msg_preview = (
|
||||
message[:200]
|
||||
@@ -421,29 +500,70 @@ class SubscriptionManager:
|
||||
self.connection_states[subscription_name] = "invalid_uri"
|
||||
break # Don't retry on invalid URI
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error: {e}"
|
||||
except ValueError as e:
|
||||
# Non-retryable configuration error (e.g. UNRAID_API_URL not set)
|
||||
error_msg = f"Configuration error: {e}"
|
||||
logger.error(f"[WEBSOCKET:{subscription_name}] {error_msg}")
|
||||
self.last_error[subscription_name] = error_msg
|
||||
self.connection_states[subscription_name] = "error"
|
||||
break # Don't retry on configuration errors
|
||||
|
||||
# Calculate backoff delay
|
||||
retry_delay = min(retry_delay * 1.5, max_retry_delay)
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error: {e}"
|
||||
logger.error(f"[WEBSOCKET:{subscription_name}] {error_msg}", exc_info=True)
|
||||
self.last_error[subscription_name] = error_msg
|
||||
self.connection_states[subscription_name] = "error"
|
||||
|
||||
# Check if connection was stable before deciding on retry behavior
|
||||
start_time = self._connection_start_times.pop(subscription_name, None)
|
||||
if start_time is not None:
|
||||
connected_duration = time.monotonic() - start_time
|
||||
if connected_duration >= _STABLE_CONNECTION_SECONDS:
|
||||
# Connection was stable — reset retry counter and backoff
|
||||
logger.info(
|
||||
f"[WEBSOCKET:{subscription_name}] Connection was stable "
|
||||
f"({connected_duration:.0f}s >= {_STABLE_CONNECTION_SECONDS}s), "
|
||||
f"resetting retry counter"
|
||||
)
|
||||
self.reconnect_attempts[subscription_name] = 0
|
||||
retry_delay = 5
|
||||
else:
|
||||
logger.warning(
|
||||
f"[WEBSOCKET:{subscription_name}] Connection was unstable "
|
||||
f"({connected_duration:.0f}s < {_STABLE_CONNECTION_SECONDS}s), "
|
||||
f"keeping retry counter at {self.reconnect_attempts.get(subscription_name, 0)}"
|
||||
)
|
||||
# Only escalate backoff when connection was NOT stable
|
||||
retry_delay = min(retry_delay * 1.5, max_retry_delay)
|
||||
else:
|
||||
# No connection was established — escalate backoff
|
||||
retry_delay = min(retry_delay * 1.5, max_retry_delay)
|
||||
logger.info(
|
||||
f"[WEBSOCKET:{subscription_name}] Reconnecting in {retry_delay:.1f} seconds..."
|
||||
)
|
||||
self.connection_states[subscription_name] = "reconnecting"
|
||||
await asyncio.sleep(retry_delay)
|
||||
|
||||
def get_resource_data(self, resource_name: str) -> dict[str, Any] | None:
|
||||
# The while loop exited (via break or max_retries exceeded).
|
||||
# Remove from active_subscriptions so start_subscription() can restart it.
|
||||
async with self._task_lock:
|
||||
self.active_subscriptions.pop(subscription_name, None)
|
||||
logger.info(
|
||||
f"[SUBSCRIPTION:{subscription_name}] Subscription loop ended — "
|
||||
f"removed from active_subscriptions. Final state: "
|
||||
f"{self.connection_states.get(subscription_name, 'unknown')}"
|
||||
)
|
||||
|
||||
async def get_resource_data(self, resource_name: str) -> dict[str, Any] | None:
|
||||
"""Get current resource data with enhanced logging."""
|
||||
logger.debug(f"[RESOURCE:{resource_name}] Resource data requested")
|
||||
|
||||
if resource_name in self.resource_data:
|
||||
data = self.resource_data[resource_name]
|
||||
age_seconds = (datetime.now() - data.last_updated).total_seconds()
|
||||
logger.debug(f"[RESOURCE:{resource_name}] Data found, age: {age_seconds:.1f}s")
|
||||
return data.data
|
||||
async with self._data_lock:
|
||||
if resource_name in self.resource_data:
|
||||
data = self.resource_data[resource_name]
|
||||
age_seconds = (datetime.now(UTC) - data.last_updated).total_seconds()
|
||||
logger.debug(f"[RESOURCE:{resource_name}] Data found, age: {age_seconds:.1f}s")
|
||||
return data.data
|
||||
logger.debug(f"[RESOURCE:{resource_name}] No data available")
|
||||
return None
|
||||
|
||||
@@ -453,38 +573,39 @@ class SubscriptionManager:
|
||||
logger.debug(f"[SUBSCRIPTION_MANAGER] Active subscriptions: {active}")
|
||||
return active
|
||||
|
||||
def get_subscription_status(self) -> dict[str, dict[str, Any]]:
|
||||
async def get_subscription_status(self) -> dict[str, dict[str, Any]]:
|
||||
"""Get detailed status of all subscriptions for diagnostics."""
|
||||
status = {}
|
||||
|
||||
for sub_name, config in self.subscription_configs.items():
|
||||
sub_status = {
|
||||
"config": {
|
||||
"resource": config["resource"],
|
||||
"description": config["description"],
|
||||
"auto_start": config.get("auto_start", False),
|
||||
},
|
||||
"runtime": {
|
||||
"active": sub_name in self.active_subscriptions,
|
||||
"connection_state": self.connection_states.get(sub_name, "not_started"),
|
||||
"reconnect_attempts": self.reconnect_attempts.get(sub_name, 0),
|
||||
"last_error": self.last_error.get(sub_name, None),
|
||||
},
|
||||
}
|
||||
|
||||
# Add data info if available
|
||||
if sub_name in self.resource_data:
|
||||
data_info = self.resource_data[sub_name]
|
||||
age_seconds = (datetime.now() - data_info.last_updated).total_seconds()
|
||||
sub_status["data"] = {
|
||||
"available": True,
|
||||
"last_updated": data_info.last_updated.isoformat(),
|
||||
"age_seconds": age_seconds,
|
||||
async with self._task_lock, self._data_lock:
|
||||
for sub_name, config in self.subscription_configs.items():
|
||||
sub_status = {
|
||||
"config": {
|
||||
"resource": config["resource"],
|
||||
"description": config["description"],
|
||||
"auto_start": config.get("auto_start", False),
|
||||
},
|
||||
"runtime": {
|
||||
"active": sub_name in self.active_subscriptions,
|
||||
"connection_state": self.connection_states.get(sub_name, "not_started"),
|
||||
"reconnect_attempts": self.reconnect_attempts.get(sub_name, 0),
|
||||
"last_error": self.last_error.get(sub_name, None),
|
||||
},
|
||||
}
|
||||
else:
|
||||
sub_status["data"] = {"available": False}
|
||||
|
||||
status[sub_name] = sub_status
|
||||
# Add data info if available
|
||||
if sub_name in self.resource_data:
|
||||
data_info = self.resource_data[sub_name]
|
||||
age_seconds = (datetime.now(UTC) - data_info.last_updated).total_seconds()
|
||||
sub_status["data"] = {
|
||||
"available": True,
|
||||
"last_updated": data_info.last_updated.isoformat(),
|
||||
"age_seconds": age_seconds,
|
||||
}
|
||||
else:
|
||||
sub_status["data"] = {"available": False}
|
||||
|
||||
status[sub_name] = sub_status
|
||||
|
||||
logger.debug(f"[SUBSCRIPTION_MANAGER] Generated status for {len(status)} subscriptions")
|
||||
return status
|
||||
|
||||
52
unraid_mcp/subscriptions/queries.py
Normal file
52
unraid_mcp/subscriptions/queries.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""GraphQL subscription query strings for snapshot and collect operations."""
|
||||
|
||||
# Subscriptions that only emit on state changes (not on a regular interval).
|
||||
# When subscribe_once times out for these, it means no recent change — not an error.
|
||||
EVENT_DRIVEN_ACTIONS: frozenset[str] = frozenset(
|
||||
{
|
||||
"parity_progress",
|
||||
"ups_status",
|
||||
"notifications_overview",
|
||||
"owner",
|
||||
"server_status",
|
||||
}
|
||||
)
|
||||
|
||||
SNAPSHOT_ACTIONS = {
|
||||
"cpu": """
|
||||
subscription { systemMetricsCpu { id percentTotal cpus { percentTotal percentUser percentSystem percentIdle } } }
|
||||
""",
|
||||
"memory": """
|
||||
subscription { systemMetricsMemory { id total used free available active buffcache percentTotal swapTotal swapUsed swapFree percentSwapTotal } }
|
||||
""",
|
||||
"cpu_telemetry": """
|
||||
subscription { systemMetricsCpuTelemetry { id totalPower power temp } }
|
||||
""",
|
||||
"array_state": """
|
||||
subscription { arraySubscription { id state capacity { kilobytes { free used total } } parityCheckStatus { status progress speed errors } } }
|
||||
""",
|
||||
"parity_progress": """
|
||||
subscription { parityHistorySubscription { date status progress speed errors correcting paused running } }
|
||||
""",
|
||||
"ups_status": """
|
||||
subscription { upsUpdates { id name model status battery { chargeLevel estimatedRuntime health } power { inputVoltage outputVoltage loadPercentage } } }
|
||||
""",
|
||||
"notifications_overview": """
|
||||
subscription { notificationsOverview { unread { info warning alert total } archive { info warning alert total } } }
|
||||
""",
|
||||
"owner": """
|
||||
subscription { ownerSubscription { username url avatar } }
|
||||
""",
|
||||
"server_status": """
|
||||
subscription { serversSubscription { id name status guid wanip lanip localurl remoteurl } }
|
||||
""",
|
||||
}
|
||||
|
||||
COLLECT_ACTIONS = {
|
||||
"notification_feed": """
|
||||
subscription { notificationAdded { id title subject description importance type timestamp } }
|
||||
""",
|
||||
"log_tail": """
|
||||
subscription LogTail($path: String!) { logFile(path: $path) { path content totalLines startLine } }
|
||||
""",
|
||||
}
|
||||
@@ -4,34 +4,45 @@ This module defines MCP resources that bridge between the subscription manager
|
||||
and the MCP protocol, providing fallback queries when subscription data is unavailable.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
from typing import Final
|
||||
|
||||
import anyio
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from ..config.logging import logger
|
||||
from .manager import subscription_manager
|
||||
from .queries import SNAPSHOT_ACTIONS
|
||||
from .snapshot import subscribe_once
|
||||
|
||||
|
||||
# Global flag to track subscription startup
|
||||
_subscriptions_started = False
|
||||
_startup_lock: Final[asyncio.Lock] = asyncio.Lock()
|
||||
|
||||
|
||||
async def ensure_subscriptions_started() -> None:
|
||||
"""Ensure subscriptions are started, called from async context."""
|
||||
global _subscriptions_started
|
||||
|
||||
# Fast-path: skip lock if already started
|
||||
if _subscriptions_started:
|
||||
return
|
||||
|
||||
logger.info("[STARTUP] First async operation detected, starting subscriptions...")
|
||||
try:
|
||||
await autostart_subscriptions()
|
||||
_subscriptions_started = True
|
||||
logger.info("[STARTUP] Subscriptions started successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"[STARTUP] Failed to start subscriptions: {e}", exc_info=True)
|
||||
# Slow-path: acquire lock for initialization (double-checked locking)
|
||||
async with _startup_lock:
|
||||
if _subscriptions_started:
|
||||
return
|
||||
|
||||
logger.info("[STARTUP] First async operation detected, starting subscriptions...")
|
||||
try:
|
||||
await autostart_subscriptions()
|
||||
_subscriptions_started = True
|
||||
logger.info("[STARTUP] Subscriptions started successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"[STARTUP] Failed to start subscriptions: {e}", exc_info=True)
|
||||
|
||||
|
||||
async def autostart_subscriptions() -> None:
|
||||
@@ -39,11 +50,12 @@ async def autostart_subscriptions() -> None:
|
||||
logger.info("[AUTOSTART] Initiating subscription auto-start process...")
|
||||
|
||||
try:
|
||||
# Use the new SubscriptionManager auto-start method
|
||||
# Use the SubscriptionManager auto-start method
|
||||
await subscription_manager.auto_start_all_subscriptions()
|
||||
logger.info("[AUTOSTART] Auto-start process completed successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"[AUTOSTART] Failed during auto-start process: {e}", exc_info=True)
|
||||
raise # Propagate so ensure_subscriptions_started doesn't mark as started
|
||||
|
||||
# Optional log file subscription
|
||||
log_path = os.getenv("UNRAID_AUTOSTART_LOG_PATH")
|
||||
@@ -82,8 +94,8 @@ def register_subscription_resources(mcp: FastMCP) -> None:
|
||||
async def logs_stream_resource() -> str:
|
||||
"""Real-time log stream data from subscription."""
|
||||
await ensure_subscriptions_started()
|
||||
data = subscription_manager.get_resource_data("logFileSubscription")
|
||||
if data:
|
||||
data = await subscription_manager.get_resource_data("logFileSubscription")
|
||||
if data is not None:
|
||||
return json.dumps(data, indent=2)
|
||||
return json.dumps(
|
||||
{
|
||||
@@ -92,4 +104,45 @@ def register_subscription_resources(mcp: FastMCP) -> None:
|
||||
}
|
||||
)
|
||||
|
||||
def _make_resource_fn(action: str):
|
||||
async def _live_resource() -> str:
|
||||
await ensure_subscriptions_started()
|
||||
data = await subscription_manager.get_resource_data(action)
|
||||
if data is not None:
|
||||
return json.dumps(data, indent=2)
|
||||
# Surface permanent errors instead of reporting "connecting" indefinitely
|
||||
last_error = subscription_manager.last_error.get(action)
|
||||
if last_error:
|
||||
return json.dumps(
|
||||
{
|
||||
"status": "error",
|
||||
"message": f"Subscription '{action}' failed: {last_error}",
|
||||
}
|
||||
)
|
||||
# When auto-start is disabled, fall back to a one-shot fetch so the
|
||||
# resource returns real data instead of a perpetual "connecting" placeholder.
|
||||
if not subscription_manager.auto_start_enabled:
|
||||
try:
|
||||
query_info = SNAPSHOT_ACTIONS.get(action)
|
||||
if query_info is not None:
|
||||
fallback_data = await subscribe_once(query_info)
|
||||
return json.dumps(fallback_data, indent=2)
|
||||
except Exception as e:
|
||||
logger.warning("[RESOURCE] On-demand fallback for '%s' failed: %s", action, e)
|
||||
return json.dumps(
|
||||
{
|
||||
"status": "connecting",
|
||||
"message": f"Subscription '{action}' is starting. Retry in a moment.",
|
||||
}
|
||||
)
|
||||
|
||||
_live_resource.__name__ = f"{action}_resource"
|
||||
_live_resource.__doc__ = (
|
||||
f"Real-time {action.replace('_', ' ')} data via WebSocket subscription."
|
||||
)
|
||||
return _live_resource
|
||||
|
||||
for _action in SNAPSHOT_ACTIONS:
|
||||
mcp.resource(f"unraid://live/{_action}")(_make_resource_fn(_action))
|
||||
|
||||
logger.info("Subscription resources registered successfully")
|
||||
|
||||
171
unraid_mcp/subscriptions/snapshot.py
Normal file
171
unraid_mcp/subscriptions/snapshot.py
Normal file
@@ -0,0 +1,171 @@
|
||||
"""One-shot GraphQL subscription helpers for MCP tool snapshot actions.
|
||||
|
||||
`subscribe_once(query, variables, timeout)` — connect, subscribe, return the
|
||||
first event's data, then disconnect.
|
||||
|
||||
`subscribe_collect(query, variables, collect_for, timeout)` — connect,
|
||||
subscribe, collect all events for `collect_for` seconds, return the list.
|
||||
|
||||
Neither function maintains a persistent connection — they open and close a
|
||||
WebSocket per call. This is intentional: MCP tools are request-response.
|
||||
Use the SubscriptionManager for long-lived monitoring resources.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
import websockets
|
||||
from websockets.typing import Subprotocol
|
||||
|
||||
from ..config.logging import logger
|
||||
from ..config.settings import UNRAID_API_KEY
|
||||
from ..core.exceptions import ToolError
|
||||
from .utils import build_ws_ssl_context, build_ws_url
|
||||
|
||||
|
||||
async def subscribe_once(
|
||||
query: str,
|
||||
variables: dict[str, Any] | None = None,
|
||||
timeout: float = 10.0, # noqa: ASYNC109
|
||||
) -> dict[str, Any]:
|
||||
"""Open a WebSocket subscription, receive the first data event, close, return it.
|
||||
|
||||
Raises ToolError on auth failure, GraphQL errors, or timeout.
|
||||
"""
|
||||
ws_url = build_ws_url()
|
||||
ssl_context = build_ws_ssl_context(ws_url)
|
||||
|
||||
async with websockets.connect(
|
||||
ws_url,
|
||||
subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")],
|
||||
open_timeout=timeout,
|
||||
ping_interval=20,
|
||||
ping_timeout=10,
|
||||
ssl=ssl_context,
|
||||
) as ws:
|
||||
proto = ws.subprotocol or "graphql-transport-ws"
|
||||
sub_id = "snapshot-1"
|
||||
|
||||
# Handshake
|
||||
init: dict[str, Any] = {"type": "connection_init"}
|
||||
if UNRAID_API_KEY:
|
||||
init["payload"] = {"x-api-key": UNRAID_API_KEY}
|
||||
await ws.send(json.dumps(init))
|
||||
|
||||
raw = await asyncio.wait_for(ws.recv(), timeout=timeout)
|
||||
ack = json.loads(raw)
|
||||
if ack.get("type") == "connection_error":
|
||||
raise ToolError(f"Subscription auth failed: {ack.get('payload')}")
|
||||
if ack.get("type") != "connection_ack":
|
||||
raise ToolError(f"Unexpected handshake response: {ack.get('type')}")
|
||||
|
||||
# Subscribe
|
||||
start_type = "subscribe" if proto == "graphql-transport-ws" else "start"
|
||||
await ws.send(
|
||||
json.dumps(
|
||||
{
|
||||
"id": sub_id,
|
||||
"type": start_type,
|
||||
"payload": {"query": query, "variables": variables or {}},
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
# Await first matching data event
|
||||
expected_type = "next" if proto == "graphql-transport-ws" else "data"
|
||||
|
||||
try:
|
||||
async with asyncio.timeout(timeout):
|
||||
async for raw_msg in ws:
|
||||
msg = json.loads(raw_msg)
|
||||
if msg.get("type") == "ping":
|
||||
await ws.send(json.dumps({"type": "pong"}))
|
||||
continue
|
||||
if msg.get("type") == expected_type and msg.get("id") == sub_id:
|
||||
payload = msg.get("payload", {})
|
||||
if errors := payload.get("errors"):
|
||||
msgs = "; ".join(e.get("message", str(e)) for e in errors)
|
||||
raise ToolError(f"Subscription errors: {msgs}")
|
||||
if data := payload.get("data"):
|
||||
return data
|
||||
elif msg.get("type") == "error" and msg.get("id") == sub_id:
|
||||
raise ToolError(f"Subscription error: {msg.get('payload')}")
|
||||
except TimeoutError:
|
||||
raise ToolError(f"Subscription timed out after {timeout:.0f}s") from None
|
||||
|
||||
raise ToolError("WebSocket closed before receiving subscription data")
|
||||
|
||||
|
||||
async def subscribe_collect(
|
||||
query: str,
|
||||
variables: dict[str, Any] | None = None,
|
||||
collect_for: float = 5.0,
|
||||
timeout: float = 10.0, # noqa: ASYNC109
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Open a subscription, collect events for `collect_for` seconds, close, return list.
|
||||
|
||||
Returns an empty list if no events arrive within the window.
|
||||
Always closes the connection after the window expires.
|
||||
"""
|
||||
ws_url = build_ws_url()
|
||||
ssl_context = build_ws_ssl_context(ws_url)
|
||||
events: list[dict[str, Any]] = []
|
||||
|
||||
async with websockets.connect(
|
||||
ws_url,
|
||||
subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")],
|
||||
open_timeout=timeout,
|
||||
ping_interval=20,
|
||||
ping_timeout=10,
|
||||
ssl=ssl_context,
|
||||
) as ws:
|
||||
proto = ws.subprotocol or "graphql-transport-ws"
|
||||
sub_id = "snapshot-1"
|
||||
|
||||
init: dict[str, Any] = {"type": "connection_init"}
|
||||
if UNRAID_API_KEY:
|
||||
init["payload"] = {"x-api-key": UNRAID_API_KEY}
|
||||
await ws.send(json.dumps(init))
|
||||
|
||||
raw = await asyncio.wait_for(ws.recv(), timeout=timeout)
|
||||
ack = json.loads(raw)
|
||||
if ack.get("type") == "connection_error":
|
||||
raise ToolError(f"Subscription auth failed: {ack.get('payload')}")
|
||||
if ack.get("type") != "connection_ack":
|
||||
raise ToolError(f"Unexpected handshake response: {ack.get('type')}")
|
||||
|
||||
start_type = "subscribe" if proto == "graphql-transport-ws" else "start"
|
||||
await ws.send(
|
||||
json.dumps(
|
||||
{
|
||||
"id": sub_id,
|
||||
"type": start_type,
|
||||
"payload": {"query": query, "variables": variables or {}},
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
expected_type = "next" if proto == "graphql-transport-ws" else "data"
|
||||
|
||||
try:
|
||||
async with asyncio.timeout(collect_for):
|
||||
async for raw_msg in ws:
|
||||
msg = json.loads(raw_msg)
|
||||
if msg.get("type") == "ping":
|
||||
await ws.send(json.dumps({"type": "pong"}))
|
||||
continue
|
||||
if msg.get("type") == expected_type and msg.get("id") == sub_id:
|
||||
payload = msg.get("payload", {})
|
||||
if errors := payload.get("errors"):
|
||||
msgs = "; ".join(e.get("message", str(e)) for e in errors)
|
||||
raise ToolError(f"Subscription errors: {msgs}")
|
||||
if data := payload.get("data"):
|
||||
events.append(data)
|
||||
except TimeoutError:
|
||||
pass # Collection window expired — return whatever was collected
|
||||
|
||||
logger.debug(f"[SNAPSHOT] Collected {len(events)} events in {collect_for}s window")
|
||||
return events
|
||||
@@ -1,8 +1,41 @@
|
||||
"""Shared utilities for the subscription system."""
|
||||
|
||||
import ssl as _ssl
|
||||
from typing import Any
|
||||
|
||||
from ..config.settings import UNRAID_VERIFY_SSL
|
||||
from ..config.settings import UNRAID_API_URL, UNRAID_VERIFY_SSL
|
||||
|
||||
|
||||
def build_ws_url() -> str:
|
||||
"""Build a WebSocket URL from the configured UNRAID_API_URL.
|
||||
|
||||
Converts http(s) scheme to ws(s) and ensures /graphql path suffix.
|
||||
|
||||
Returns:
|
||||
The WebSocket URL string (e.g. "wss://10.1.0.2:31337/graphql").
|
||||
|
||||
Raises:
|
||||
ValueError: If UNRAID_API_URL is not configured or has an unrecognised scheme.
|
||||
"""
|
||||
if not UNRAID_API_URL:
|
||||
raise ValueError("UNRAID_API_URL is not configured")
|
||||
|
||||
if UNRAID_API_URL.startswith("https://"):
|
||||
ws_url = "wss://" + UNRAID_API_URL[len("https://") :]
|
||||
elif UNRAID_API_URL.startswith("http://"):
|
||||
ws_url = "ws://" + UNRAID_API_URL[len("http://") :]
|
||||
elif UNRAID_API_URL.startswith(("ws://", "wss://")):
|
||||
ws_url = UNRAID_API_URL # Already a WebSocket URL
|
||||
else:
|
||||
raise ValueError(
|
||||
f"UNRAID_API_URL must start with http://, https://, ws://, or wss://. "
|
||||
f"Got: {UNRAID_API_URL[:20]}..."
|
||||
)
|
||||
|
||||
if not ws_url.endswith("/graphql"):
|
||||
ws_url = ws_url.rstrip("/") + "/graphql"
|
||||
|
||||
return ws_url
|
||||
|
||||
|
||||
def build_ws_ssl_context(ws_url: str) -> _ssl.SSLContext | None:
|
||||
@@ -25,3 +58,41 @@ def build_ws_ssl_context(ws_url: str) -> _ssl.SSLContext | None:
|
||||
ctx.check_hostname = False
|
||||
ctx.verify_mode = _ssl.CERT_NONE
|
||||
return ctx
|
||||
|
||||
|
||||
def _analyze_subscription_status(
|
||||
status: dict[str, Any],
|
||||
) -> tuple[int, list[dict[str, Any]]]:
|
||||
"""Analyze subscription status dict, returning error count and connection issues.
|
||||
|
||||
Only reports connection_issues for subscriptions that are currently in a
|
||||
failure state (not recovered ones that happen to have a stale last_error).
|
||||
|
||||
Args:
|
||||
status: Dict of subscription name -> status info from get_subscription_status().
|
||||
|
||||
Returns:
|
||||
Tuple of (error_count, connection_issues_list).
|
||||
"""
|
||||
_error_states = frozenset(
|
||||
{"error", "auth_failed", "timeout", "max_retries_exceeded", "invalid_uri"}
|
||||
)
|
||||
error_count = 0
|
||||
connection_issues: list[dict[str, Any]] = []
|
||||
|
||||
for sub_name, sub_status in status.items():
|
||||
runtime = sub_status.get("runtime", {})
|
||||
conn_state = runtime.get("connection_state", "unknown")
|
||||
if conn_state in _error_states:
|
||||
error_count += 1
|
||||
# Gate on current failure state so recovered subscriptions are not reported
|
||||
if runtime.get("last_error") and conn_state in _error_states:
|
||||
connection_issues.append(
|
||||
{
|
||||
"subscription": sub_name,
|
||||
"state": conn_state,
|
||||
"error": runtime["last_error"],
|
||||
}
|
||||
)
|
||||
|
||||
return error_count, connection_issues
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user