diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json index 58e93ab..b650f84 100644 --- a/.claude-plugin/plugin.json +++ b/.claude-plugin/plugin.json @@ -16,12 +16,7 @@ "--directory", "${CLAUDE_PLUGIN_ROOT}", "unraid-mcp-server" - ], - "env": { - "UNRAID_API_URL": "${UNRAID_API_URL}", - "UNRAID_API_KEY": "${UNRAID_API_KEY}", - "UNRAID_MCP_TRANSPORT": "stdio" - } + ] } } } diff --git a/.gitignore b/.gitignore index 072950d..766ee6d 100644 --- a/.gitignore +++ b/.gitignore @@ -21,13 +21,23 @@ coverage.xml # Virtual environments .venv .venv-backend + +# Environment files (only .env.example is tracked) .env -.env.local +.env.* +!.env.example + +# Logs *.log logs/ + +# IDE/Editor .bivvy .cursor +# Claude Code user settings (gitignore local settings) +.claude/settings.local.json + # Serena IDE configuration .serena/ @@ -36,6 +46,7 @@ logs/ .full-review/ /docs/plans/ /docs/sessions/ +/docs/reports/ # Test planning documents /DESTRUCTIVE_ACTIONS.md diff --git a/.plan.md b/.plan.md deleted file mode 100644 index 826e8a5..0000000 --- a/.plan.md +++ /dev/null @@ -1,544 +0,0 @@ -# Implementation Plan: mcporter Integration Tests + Destructive Action Gating - -**Date:** 2026-02-15 -**Status:** Awaiting Approval -**Estimated Effort:** 8-12 hours - -## Overview - -Implement comprehensive integration testing using mcporter CLI to validate all 86 tool actions (after removing 4 destructive array operations) against live Unraid servers, plus add environment variable gates for remaining destructive actions to prevent accidental operations. - -## Requirements - -1. **Remove destructive array operations** - start, stop, shutdown, reboot should not be exposed via MCP -2. **Add per-tool environment variable gates** - UNRAID_ALLOW_*_DESTRUCTIVE flags for remaining destructive actions -3. **Build mcporter test suite** - Real end-to-end testing of all 86 actions against live servers (tootie/shart) -4. **Document all actions** - Comprehensive action catalog with test specifications - -## Architecture Changes - -### 1. Settings Infrastructure (Pydantic-based) - -**File:** `unraid_mcp/config/settings.py` - -- Migrate from simple `os.getenv()` to Pydantic `BaseSettings` -- Add 7 destructive action gate flags (all default to False for safety): - - `allow_docker_destructive` (docker remove) - - `allow_vm_destructive` (vm force_stop, reset) - - `allow_notifications_destructive` (delete, delete_archived) - - `allow_rclone_destructive` (delete_remote) - - `allow_users_destructive` (user delete) - - `allow_keys_destructive` (key delete) - - `allow_array_destructive` (REMOVED - no longer needed after task 1) -- Add `get_config_summary()` method showing gate status -- Maintain backwards compatibility via module-level exports - -**Dependencies:** Add `pydantic-settings` to `pyproject.toml` - -### 2. Tool Implementation Pattern - -**Pattern for all tools with destructive actions:** - -```python -from ..config.settings import settings - -# In tool function: -if action in DESTRUCTIVE_ACTIONS: - # Check 1: Environment variable gate (first line of defense) - if not settings.allow_{tool}_destructive: - raise ToolError( - f"Destructive {tool} action '{action}' is disabled. " - f"Set UNRAID_ALLOW_{TOOL}_DESTRUCTIVE=true to enable. " - f"This is a safety gate to prevent accidental operations." - ) - - # Check 2: Runtime confirmation (second line of defense) - if not confirm: - raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") -``` - -**Tools requiring updates:** -- `unraid_mcp/tools/docker.py` (1 action: remove) -- `unraid_mcp/tools/virtualization.py` (2 actions: force_stop, reset) -- `unraid_mcp/tools/notifications.py` (2 actions: delete, delete_archived) -- `unraid_mcp/tools/rclone.py` (1 action: delete_remote) -- `unraid_mcp/tools/users.py` (1 action: delete) -- `unraid_mcp/tools/keys.py` (1 action: delete) - -### 3. mcporter Integration Test Suite - -**New Directory Structure:** - -``` -tests/integration/ -├── helpers/ -│ ├── mcporter.sh # mcporter wrapper (call_tool, call_destructive, get_field) -│ ├── validation.sh # Response validation (assert_fields, assert_equals, assert_success) -│ └── reporting.sh # Test reporting (init_report, record_test, generate_summary) -├── tools/ -│ ├── test_health.sh # 3 actions -│ ├── test_info.sh # 19 actions -│ ├── test_storage.sh # 6 actions -│ ├── test_docker.sh # 15 actions -│ ├── test_vm.sh # 9 actions -│ ├── test_notifications.sh # 9 actions -│ ├── test_rclone.sh # 4 actions -│ ├── test_users.sh # 8 actions -│ ├── test_keys.sh # 5 actions -│ └── test_array.sh # 8 actions (after removal) -├── run-all.sh # Master test runner (parallel/sequential) -├── run-tool.sh # Single tool runner -└── README.md # Integration test documentation -``` - -**mcporter Configuration:** `config/mcporter.json` - -```json -{ - "mcpServers": { - "unraid-tootie": { - "command": "uv", - "args": ["run", "unraid-mcp-server"], - "env": { - "UNRAID_API_URL": "https://myunraid.net:31337/graphql", - "UNRAID_API_KEY": "${UNRAID_TOOTIE_API_KEY}", - "UNRAID_VERIFY_SSL": "false", - "UNRAID_MCP_TRANSPORT": "stdio" - }, - "cwd": "/home/jmagar/workspace/unraid-mcp" - }, - "unraid-shart": { - "command": "uv", - "args": ["run", "unraid-mcp-server"], - "env": { - "UNRAID_API_URL": "http://100.118.209.1/graphql", - "UNRAID_API_KEY": "${UNRAID_SHART_API_KEY}", - "UNRAID_VERIFY_SSL": "false", - "UNRAID_MCP_TRANSPORT": "stdio" - }, - "cwd": "/home/jmagar/workspace/unraid-mcp" - } - } -} -``` - -## Implementation Tasks - -### Task 1: Remove Destructive Array Operations - -**Files:** -- `unraid_mcp/tools/array.py` -- `tests/test_array.py` - -**Changes:** -1. Remove from `MUTATIONS` dict: - - `start` (lines 24-28) - - `stop` (lines 29-33) - - `shutdown` (lines 69-73) - - `reboot` (lines 74-78) -2. Remove from `DESTRUCTIVE_ACTIONS` set (line 81) - set becomes empty `{}` -3. Remove from `ARRAY_ACTIONS` Literal type (lines 85-86) -4. Update docstring removing these 4 actions (lines 105-106, 115-116) -5. Remove tests for these actions in `tests/test_array.py` - -**Acceptance:** -- ✅ Array tool has 8 actions (down from 12) -- ✅ `DESTRUCTIVE_ACTIONS` is empty set -- ✅ Tests pass for remaining actions -- ✅ Removed mutations are not callable - -### Task 2: Add Pydantic Settings with Destructive Gates - -**Files:** -- `unraid_mcp/config/settings.py` -- `pyproject.toml` -- `.env.example` - -**Changes:** - -1. **Add dependency:** `pydantic-settings>=2.12` in `pyproject.toml` dependencies - -2. **Update settings.py:** - - Import `BaseSettings` from `pydantic_settings` - - Create `UnraidSettings` class with all config fields - - Add 6 destructive gate fields (all default to False): - - `allow_docker_destructive: bool = Field(default=False, ...)` - - `allow_vm_destructive: bool = Field(default=False, ...)` - - `allow_notifications_destructive: bool = Field(default=False, ...)` - - `allow_rclone_destructive: bool = Field(default=False, ...)` - - `allow_users_destructive: bool = Field(default=False, ...)` - - `allow_keys_destructive: bool = Field(default=False, ...)` - - Add `get_config_summary()` method including gate status - - Instantiate global `settings = UnraidSettings()` - - Keep backwards compatibility exports - -3. **Update .env.example:** Add section documenting all destructive gates - -**Acceptance:** -- ✅ `settings` instance loads successfully -- ✅ All gate fields default to False -- ✅ `get_config_summary()` shows gate status -- ✅ Backwards compatibility maintained (existing code still works) - -### Task 3: Update Tools with Environment Variable Gates - -**Files to update:** -- `unraid_mcp/tools/docker.py` -- `unraid_mcp/tools/virtualization.py` -- `unraid_mcp/tools/notifications.py` -- `unraid_mcp/tools/rclone.py` -- `unraid_mcp/tools/users.py` -- `unraid_mcp/tools/keys.py` - -**Pattern for each tool:** - -1. Add import: `from ..config.settings import settings` -2. Add gate check before confirm check in destructive action handler: - ```python - if action in DESTRUCTIVE_ACTIONS: - if not settings.allow_{tool}_destructive: - raise ToolError( - f"Destructive {tool} action '{action}' is disabled. " - f"Set UNRAID_ALLOW_{TOOL}_DESTRUCTIVE=true to enable." - ) - if not confirm: - raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") - ``` -3. Update tool docstring documenting security requirements - -**Acceptance (per tool):** -- ✅ Destructive action fails with clear error when env var not set -- ✅ Destructive action still requires confirm=True when env var is set -- ✅ Both checks must pass for execution -- ✅ Error messages guide user to correct env var - -### Task 4: Update Test Suite with Settings Mocking - -**Files:** -- `tests/conftest.py` -- `tests/test_docker.py` -- `tests/test_vm.py` -- `tests/test_notifications.py` -- `tests/test_rclone.py` -- `tests/test_users.py` -- `tests/test_keys.py` - -**Changes:** - -1. **Add fixtures to conftest.py:** - ```python - @pytest.fixture - def mock_settings(): - # All gates disabled - - @pytest.fixture - def mock_settings_all_enabled(mock_settings): - # All gates enabled - ``` - -2. **Update each test file:** - - Add `mock_settings` parameter to fixtures - - Wrap tool calls with `with patch("unraid_mcp.tools.{tool}.settings", mock_settings):` - - Add 3 destructive action tests: - - Test gate check (env var not set, confirm=True → fails) - - Test confirm check (env var set, confirm=False → fails) - - Test success (env var set, confirm=True → succeeds) - -**Acceptance:** -- ✅ All 150 existing tests pass -- ✅ New gate tests cover all destructive actions -- ✅ Tests verify correct error messages -- ✅ Tests use mocked settings (don't rely on actual env vars) - -### Task 5: Create mcporter Configuration - -**Files:** -- `config/mcporter.json` (new) -- `tests/integration/README.md` (new) - -**Changes:** - -1. Create `config/mcporter.json` with tootie and shart server configs -2. Document how to use mcporter with the server in README -3. Include instructions for loading credentials from `~/workspace/homelab/.env` - -**Acceptance:** -- ✅ `mcporter list unraid-tootie` shows all tools -- ✅ `mcporter call unraid-tootie.unraid_health action=test_connection` succeeds -- ✅ Configuration works for both servers - -### Task 6: Build mcporter Helper Libraries - -**Files to create:** -- `tests/integration/helpers/mcporter.sh` -- `tests/integration/helpers/validation.sh` -- `tests/integration/helpers/reporting.sh` - -**Functions to implement:** - -**mcporter.sh:** -- `call_tool [params...]` - Call tool via mcporter, return JSON -- `call_destructive [params...]` - Safe destructive call -- `get_field ` - Extract field from JSON -- `is_success ` - Check if response indicates success -- `get_error ` - Extract error message - -**validation.sh:** -- `assert_fields ...` - Verify required fields exist -- `assert_equals ` - Field value equality -- `assert_matches ` - Field matches regex -- `assert_success ` - Response indicates success -- `assert_failure [pattern]` - Response indicates failure (negative test) - -**reporting.sh:** -- `init_report ` - Initialize JSON report file -- `record_test [error]` - Record test result -- `generate_summary` - Generate console summary from all reports - -**Acceptance:** -- ✅ Helper functions work correctly -- ✅ Error handling is robust -- ✅ Functions are reusable across all tool tests - -### Task 7: Implement Tool Test Scripts - -**Files to create:** -- `tests/integration/tools/test_health.sh` (3 actions) -- `tests/integration/tools/test_info.sh` (19 actions) -- `tests/integration/tools/test_storage.sh` (6 actions) -- `tests/integration/tools/test_docker.sh` (15 actions) -- `tests/integration/tools/test_vm.sh` (9 actions) -- `tests/integration/tools/test_notifications.sh` (9 actions) -- `tests/integration/tools/test_rclone.sh` (4 actions) -- `tests/integration/tools/test_users.sh` (8 actions) -- `tests/integration/tools/test_keys.sh` (5 actions) -- `tests/integration/tools/test_array.sh` (8 actions) - -**Per-script implementation:** - -1. Source helper libraries -2. Initialize report -3. Implement test functions for each action: - - Basic functionality test - - Response structure validation - - Parameter validation - - Destructive action gate tests (if applicable) -4. Run all tests and record results -5. Return exit code based on failures - -**Priority order (implement in this sequence):** -1. `test_health.sh` - Simplest (3 actions, no destructive) -2. `test_info.sh` - Large but straightforward (19 query actions) -3. `test_storage.sh` - Moderate (6 query actions) -4. `test_docker.sh` - Complex (15 actions, 1 destructive) -5. `test_vm.sh` - Complex (9 actions, 2 destructive) -6. `test_notifications.sh` - Moderate (9 actions, 2 destructive) -7. `test_rclone.sh` - Simple (4 actions, 1 destructive) -8. `test_users.sh` - Moderate (8 actions, 1 destructive) -9. `test_keys.sh` - Simple (5 actions, 1 destructive) -10. `test_array.sh` - Moderate (8 actions, no destructive after removal) - -**Acceptance:** -- ✅ Each script tests all actions for its tool -- ✅ Tests validate response structure -- ✅ Destructive action gates are tested -- ✅ Scripts generate JSON reports -- ✅ Exit code indicates success/failure - -### Task 8: Build Test Runners - -**Files to create:** -- `tests/integration/run-all.sh` -- `tests/integration/run-tool.sh` - -**run-all.sh features:** -- Load credentials from `~/workspace/homelab/.env` -- Support sequential and parallel execution modes -- Run all 10 tool test scripts -- Generate summary report -- Return exit code based on any failures - -**run-tool.sh features:** -- Accept tool name as argument -- Load credentials -- Execute single tool test script -- Pass through exit code - -**Acceptance:** -- ✅ `run-all.sh` executes all tool tests -- ✅ Parallel mode works correctly (no race conditions) -- ✅ Summary report shows pass/fail/skip counts -- ✅ `run-tool.sh health` runs only health tests -- ✅ Exit codes are correct - -### Task 9: Document Action Catalog - -**File to create:** -- `docs/testing/action-catalog.md` - -**Content:** -- Table of all 86 actions across 10 tools -- For each action: - - Tool name - - Action name - - Type (query/mutation/compound) - - Required parameters - - Optional parameters - - Destructive? (yes/no + env var if yes) - - Expected response structure - - Example mcporter call - - Validation criteria - -**Acceptance:** -- ✅ All 86 actions documented -- ✅ Specifications are detailed and accurate -- ✅ Examples are runnable -- ✅ Becomes source of truth for test implementation - -### Task 10: Integration Documentation - -**Files to create/update:** -- `tests/integration/README.md` -- `docs/testing/integration-tests.md` -- `docs/testing/test-environments.md` -- `README.md` (add integration test section) - -**Content:** -- How to run integration tests -- How to configure mcporter -- Server setup (tootie/shart) -- Environment variable gates -- Destructive action testing -- CI/CD integration -- Troubleshooting - -**Acceptance:** -- ✅ Clear setup instructions -- ✅ Examples for common use cases -- ✅ Integration with existing pytest docs -- ✅ CI/CD pipeline documented - -## Testing Strategy - -### Unit Tests (pytest - existing) -- **150 tests** across 10 tool modules -- Mock GraphQL responses -- Fast, isolated, offline -- Cover edge cases and error paths - -### Integration Tests (mcporter - new) -- **86 tests** (one per action) -- Real Unraid server calls -- Slow, dependent, online -- Validate actual API behavior - -### Test Matrix - -| Tool | Actions | pytest Tests | mcporter Tests | Destructive | -|------|---------|--------------|----------------|-------------| -| health | 3 | 10 | 3 | 0 | -| info | 19 | 98 | 19 | 0 | -| storage | 6 | 11 | 6 | 0 | -| docker | 15 | 28 | 15 | 1 | -| vm | 9 | 25 | 9 | 2 | -| notifications | 9 | 7 | 9 | 2 | -| rclone | 4 | (pending) | 4 | 1 | -| users | 8 | (pending) | 8 | 1 | -| keys | 5 | (pending) | 5 | 1 | -| array | 8 | 26 | 8 | 0 | -| **TOTAL** | **86** | **~150** | **86** | **8** | - -## Validation Checklist - -### Code Changes -- [ ] Array tool has 8 actions (removed start/stop/shutdown/reboot) -- [ ] Settings class with 6 destructive gate flags -- [ ] All 6 tools updated with environment variable gates -- [ ] All 6 tool tests updated with gate test cases -- [ ] All existing 150 pytest tests pass -- [ ] `pydantic-settings` added to dependencies -- [ ] `.env.example` updated with gate documentation - -### Integration Tests -- [ ] mcporter configuration works for both servers -- [ ] All 3 helper libraries implemented -- [ ] All 10 tool test scripts implemented -- [ ] Test runners (run-all, run-tool) work correctly -- [ ] All 86 actions have test coverage -- [ ] Destructive action gates are tested -- [ ] Reports generate correctly - -### Documentation -- [ ] Action catalog documents all 86 actions -- [ ] Integration test README is clear -- [ ] Environment setup documented -- [ ] CI/CD integration documented -- [ ] Project README updated - -## Success Criteria - -1. **Safety:** Destructive actions require both env var AND confirm=True -2. **Coverage:** All 86 actions have integration tests -3. **Quality:** Clear error messages guide users to correct env vars -4. **Automation:** Test suite runs via single command -5. **Documentation:** Complete action catalog and testing guide - -## Risks & Mitigations - -### Risk: Breaking existing deployments -**Impact:** HIGH - Users suddenly can't execute destructive actions -**Mitigation:** -- Clear error messages with exact env var to set -- Document migration in release notes -- Default to disabled (safe) but guide users to enable - -### Risk: Integration tests are flaky -**Impact:** MEDIUM - CI/CD unreliable -**Mitigation:** -- Test against stable servers (tootie/shart) -- Implement retry logic for network errors -- Skip destructive tests if env vars not set (not failures) - -### Risk: mcporter configuration complexity -**Impact:** LOW - Difficult for contributors to run tests -**Mitigation:** -- Clear setup documentation -- Example .env template -- Helper script to validate setup - -## Dependencies - -- `pydantic-settings>=2.12` (Python package) -- `mcporter` (npm package - user must install) -- `jq` (system package for JSON parsing in bash) -- Access to tootie/shart servers (for integration tests) -- Credentials in `~/workspace/homelab/.env` - -## Timeline Estimate - -| Task | Estimated Time | -|------|---------------| -| 1. Remove array ops | 30 min | -| 2. Add settings infrastructure | 1 hour | -| 3. Update tools with gates | 2 hours | -| 4. Update test suite | 2 hours | -| 5. mcporter config | 30 min | -| 6. Helper libraries | 1.5 hours | -| 7. Tool test scripts | 4 hours | -| 8. Test runners | 1 hour | -| 9. Action catalog | 2 hours | -| 10. Documentation | 1.5 hours | -| **Total** | **~12 hours** | - -## Notes - -- Integration tests complement (not replace) existing pytest suite -- Tests validate actual Unraid API behavior, not just our code -- Environment variable gates provide defense-in-depth security -- mcporter enables real-world validation impossible with mocked tests -- Action catalog becomes living documentation for all tools - ---- - -**Plan Status:** Awaiting user approval -**Next Step:** Review plan, make adjustments, then execute via task list diff --git a/CLAUDE.md b/CLAUDE.md index e60e1d3..9a28b18 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -84,15 +84,15 @@ docker compose down - **Health Monitoring**: Comprehensive health check tool for system monitoring - **Real-time Subscriptions**: WebSocket-based live data streaming -### Tool Categories (10 Tools, 90 Actions) +### Tool Categories (10 Tools, 76 Actions) 1. **`unraid_info`** (19 actions): overview, array, network, registration, connect, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config -2. **`unraid_array`** (12 actions): start, stop, parity_start/pause/resume/cancel/history, mount_disk, unmount_disk, clear_stats, shutdown, reboot +2. **`unraid_array`** (5 actions): parity_start, parity_pause, parity_resume, parity_cancel, parity_status 3. **`unraid_storage`** (6 actions): shares, disks, disk_details, unassigned, log_files, logs 4. **`unraid_docker`** (15 actions): list, details, start, stop, restart, pause, unpause, remove, update, update_all, logs, networks, network_details, port_conflicts, check_updates 5. **`unraid_vm`** (9 actions): list, details, start, stop, pause, resume, force_stop, reboot, reset 6. **`unraid_notifications`** (9 actions): overview, list, warnings, create, archive, unread, delete, delete_archived, archive_all 7. **`unraid_rclone`** (4 actions): list_remotes, config_form, create_remote, delete_remote -8. **`unraid_users`** (8 actions): me, list, get, add, delete, cloud, remote_access, origins +8. **`unraid_users`** (1 action): me 9. **`unraid_keys`** (5 actions): list, get, create, update, delete 10. **`unraid_health`** (3 actions): check, test_connection, diagnose diff --git a/README.md b/README.md index 367016a..cb5895d 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,7 @@ - [Installation](#-installation) - [Configuration](#-configuration) - [Available Tools & Resources](#-available-tools--resources) +- [Custom Slash Commands](#-custom-slash-commands) - [Development](#-development) - [Architecture](#-architecture) - [Troubleshooting](#-troubleshooting) @@ -45,10 +46,11 @@ ``` This provides instant access to Unraid monitoring and management through Claude Code with: -- 10 tools exposing 90 actions via the consolidated action pattern -- Real-time system metrics -- Disk health monitoring -- Docker and VM management +- **10 MCP tools** exposing **83 actions** via the consolidated action pattern +- **10 slash commands** for quick CLI-style access (`commands/`) +- Real-time system metrics and health monitoring +- Docker container and VM lifecycle management +- Disk health monitoring and storage management **See [.claude-plugin/README.md](.claude-plugin/README.md) for detailed plugin documentation.** @@ -102,13 +104,15 @@ unraid-mcp/ # ${CLAUDE_PLUGIN_ROOT} ├── .claude-plugin/ │ ├── marketplace.json # Marketplace catalog │ └── plugin.json # Plugin manifest +├── commands/ # 10 custom slash commands ├── unraid_mcp/ # MCP server Python package ├── skills/unraid/ # Skill and documentation ├── pyproject.toml # Dependencies and entry points └── scripts/ # Validation and helper scripts ``` -- **MCP Server**: 10 tools with 90 actions via GraphQL API +- **MCP Server**: 10 tools with 76 actions via GraphQL API +- **Slash Commands**: 10 commands in `commands/` for quick CLI-style access - **Skill**: `/unraid` skill for monitoring and queries - **Entry Point**: `unraid-mcp-server` defined in pyproject.toml @@ -214,18 +218,18 @@ UNRAID_VERIFY_SSL=true # true, false, or path to CA bundle Each tool uses a consolidated `action` parameter to expose multiple operations, reducing context window usage. Destructive actions require `confirm=True`. -### Tool Categories (10 Tools, 90 Actions) +### Tool Categories (10 Tools, 76 Actions) | Tool | Actions | Description | |------|---------|-------------| | **`unraid_info`** | 19 | overview, array, network, registration, connect, variables, metrics, services, display, config, online, owner, settings, server, servers, flash, ups_devices, ups_device, ups_config | -| **`unraid_array`** | 12 | start, stop, parity_start/pause/resume/cancel/history, mount_disk, unmount_disk, clear_stats, shutdown, reboot | +| **`unraid_array`** | 5 | parity_start, parity_pause, parity_resume, parity_cancel, parity_status | | **`unraid_storage`** | 6 | shares, disks, disk_details, unassigned, log_files, logs | | **`unraid_docker`** | 15 | list, details, start, stop, restart, pause, unpause, remove, update, update_all, logs, networks, network_details, port_conflicts, check_updates | | **`unraid_vm`** | 9 | list, details, start, stop, pause, resume, force_stop, reboot, reset | | **`unraid_notifications`** | 9 | overview, list, warnings, create, archive, unread, delete, delete_archived, archive_all | | **`unraid_rclone`** | 4 | list_remotes, config_form, create_remote, delete_remote | -| **`unraid_users`** | 8 | me, list, get, add, delete, cloud, remote_access, origins | +| **`unraid_users`** | 1 | me | | **`unraid_keys`** | 5 | list, get, create, update, delete | | **`unraid_health`** | 3 | check, test_connection, diagnose | @@ -236,6 +240,64 @@ Each tool uses a consolidated `action` parameter to expose multiple operations, --- +## 💬 Custom Slash Commands + +The project includes **10 custom slash commands** in `commands/` for quick access to Unraid operations: + +### Available Commands + +| Command | Actions | Quick Access | +|---------|---------|--------------| +| `/info` | 19 | System information, metrics, configuration | +| `/array` | 5 | Parity check management | +| `/storage` | 6 | Shares, disks, logs | +| `/docker` | 15 | Container management and monitoring | +| `/vm` | 9 | Virtual machine lifecycle | +| `/notifications` | 9 | Alert management | +| `/rclone` | 4 | Cloud storage remotes | +| `/users` | 1 | Current user query | +| `/keys` | 5 | API key management | +| `/health` | 3 | System health checks | + +### Example Usage + +```bash +# System monitoring +/info overview +/health check +/storage shares + +# Container management +/docker list +/docker start plex +/docker logs nginx + +# VM operations +/vm list +/vm start windows-10 + +# Notifications +/notifications warnings +/notifications archive_all + +# User management +/users list +/keys create "Automation Key" "For CI/CD" +``` + +### Command Features + +Each slash command provides: +- **Comprehensive documentation** of all available actions +- **Argument hints** for required parameters +- **Safety warnings** for destructive operations (⚠️) +- **Usage examples** for common scenarios +- **Action categorization** (Query, Lifecycle, Management, Destructive) + +Run any command without arguments to see full documentation, or type `/help` to list all available commands. + +--- + ## 🔧 Development @@ -255,15 +317,15 @@ unraid-mcp/ │ │ ├── manager.py # WebSocket management │ │ ├── resources.py # MCP resources │ │ └── diagnostics.py # Diagnostic tools -│ ├── tools/ # MCP tool categories (10 tools, 90 actions) +│ ├── tools/ # MCP tool categories (10 tools, 76 actions) │ │ ├── info.py # System information (19 actions) -│ │ ├── array.py # Array management (12 actions) +│ │ ├── array.py # Parity checks (5 actions) │ │ ├── storage.py # Storage & monitoring (6 actions) │ │ ├── docker.py # Container management (15 actions) │ │ ├── virtualization.py # VM management (9 actions) │ │ ├── notifications.py # Notification management (9 actions) │ │ ├── rclone.py # Cloud storage (4 actions) -│ │ ├── users.py # User management (8 actions) +│ │ ├── users.py # Current user query (1 action) │ │ ├── keys.py # API key management (5 actions) │ │ └── health.py # Health checks (3 actions) │ └── server.py # FastMCP server setup @@ -284,6 +346,20 @@ uv run ty check unraid_mcp/ uv run pytest ``` +### API Schema Docs Automation +```bash +# Regenerate complete GraphQL schema reference from live introspection +set -a; source .env; set +a +uv run python scripts/generate_unraid_api_reference.py +``` + +This updates `docs/UNRAID_API_COMPLETE_REFERENCE.md` with all operations, directives, and types visible to your API key. + +Optional cron example (daily at 03:15): +```bash +15 3 * * * cd /path/to/unraid-mcp && /usr/bin/env bash -lc 'set -a; source .env; set +a; uv run python scripts/generate_unraid_api_reference.py && git add docs/UNRAID_API_COMPLETE_REFERENCE.md && git commit -m "docs: refresh unraid graphql schema"' +``` + ### Development Workflow ```bash # Start development server @@ -379,4 +455,4 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file --- -*Built with ❤️ for the Unraid community* \ No newline at end of file +*Built with ❤️ for the Unraid community* diff --git a/commands/array.md b/commands/array.md new file mode 100644 index 0000000..1b294e9 --- /dev/null +++ b/commands/array.md @@ -0,0 +1,30 @@ +--- +description: Manage Unraid array parity checks +argument-hint: [action] [correct=true/false] +--- + +Execute the `unraid_array` MCP tool with action: `$1` + +## Available Actions (5) + +**Parity Check Operations:** +- `parity_start` - Start parity check/sync (optional: correct=true to fix errors) +- `parity_pause` - Pause running parity operation +- `parity_resume` - Resume paused parity operation +- `parity_cancel` - Cancel running parity operation +- `parity_status` - Get current parity check status + +## Example Usage + +``` +/array parity_start +/array parity_start correct=true +/array parity_pause +/array parity_resume +/array parity_cancel +/array parity_status +``` + +**Note:** Use `correct=true` with `parity_start` to automatically fix any parity errors found during the check. + +Use the tool to execute the requested parity operation and report the results. diff --git a/commands/docker.md b/commands/docker.md new file mode 100644 index 0000000..95b753d --- /dev/null +++ b/commands/docker.md @@ -0,0 +1,48 @@ +--- +description: Manage Docker containers on Unraid +argument-hint: [action] [additional-args] +--- + +Execute the `unraid_docker` MCP tool with action: `$1` + +## Available Actions (15) + +**Query Operations:** +- `list` - List all Docker containers with status +- `details` - Get detailed info for a container (requires container identifier) +- `logs` - Get container logs (requires container identifier) +- `check_updates` - Check for available container updates +- `port_conflicts` - Identify port conflicts +- `networks` - List Docker networks +- `network_details` - Get network details (requires network identifier) + +**Container Lifecycle:** +- `start` - Start a stopped container (requires container identifier) +- `stop` - Stop a running container (requires container identifier) +- `restart` - Restart a container (requires container identifier) +- `pause` - Pause a running container (requires container identifier) +- `unpause` - Unpause a paused container (requires container identifier) + +**Updates & Management:** +- `update` - Update a specific container (requires container identifier) +- `update_all` - Update all containers with available updates + +**⚠️ Destructive:** +- `remove` - Permanently delete a container (requires container identifier + confirmation) + +## Example Usage + +``` +/unraid-docker list +/unraid-docker details plex +/unraid-docker logs plex +/unraid-docker start nginx +/unraid-docker restart sonarr +/unraid-docker check_updates +/unraid-docker update plex +/unraid-docker port_conflicts +``` + +**Container Identification:** Use container name, ID, or partial match (fuzzy search supported) + +Use the tool to execute the requested Docker operation and report the results. diff --git a/commands/health.md b/commands/health.md new file mode 100644 index 0000000..526088a --- /dev/null +++ b/commands/health.md @@ -0,0 +1,59 @@ +--- +description: Check Unraid system health and connectivity +argument-hint: [action] +--- + +Execute the `unraid_health` MCP tool with action: `$1` + +## Available Actions (3) + +**Health Monitoring:** +- `check` - Comprehensive health check of all system components +- `test_connection` - Test basic API connectivity +- `diagnose` - Detailed diagnostic information for troubleshooting + +## What Each Action Checks + +### `check` - System Health +- API connectivity and response time +- Array status and disk health +- Running services status +- Docker container health +- VM status +- System resources (CPU, RAM, disk I/O) +- Network connectivity +- UPS status (if configured) + +Returns: Overall health status (`HEALTHY`, `WARNING`, `CRITICAL`) with component details + +### `test_connection` - Connectivity +- GraphQL endpoint availability +- Authentication validity +- Basic query execution +- Network latency + +Returns: Connection status and latency metrics + +### `diagnose` - Diagnostic Details +- Full system configuration +- Resource utilization trends +- Error logs and warnings +- Component-level diagnostics +- Troubleshooting recommendations + +Returns: Detailed diagnostic report + +## Example Usage + +``` +/unraid-health check +/unraid-health test_connection +/unraid-health diagnose +``` + +**Use Cases:** +- `check` - Quick health status (monitoring dashboards) +- `test_connection` - Verify API access (troubleshooting) +- `diagnose` - Deep dive debugging (issue resolution) + +Use the tool to execute the requested health check and present results with clear severity indicators. diff --git a/commands/info.md b/commands/info.md new file mode 100644 index 0000000..6fd79f3 --- /dev/null +++ b/commands/info.md @@ -0,0 +1,50 @@ +--- +description: Query Unraid server information and configuration +argument-hint: [action] [additional-args] +--- + +Execute the `unraid_info` MCP tool with action: `$1` + +## Available Actions (19) + +**System Overview:** +- `overview` - Complete system summary with all key metrics +- `server` - Server details (hostname, version, uptime) +- `servers` - List all known Unraid servers + +**Array & Storage:** +- `array` - Array status, disks, and health + +**Network & Registration:** +- `network` - Network configuration and interfaces +- `registration` - Registration status and license info +- `connect` - Connect service configuration +- `online` - Online status check + +**Configuration:** +- `config` - System configuration settings +- `settings` - User settings and preferences +- `variables` - Environment variables +- `display` - Display settings + +**Services & Monitoring:** +- `services` - Running services status +- `metrics` - System metrics (CPU, RAM, disk I/O) +- `ups_devices` - List all UPS devices +- `ups_device` - Get specific UPS device details (requires device_id) +- `ups_config` - UPS configuration + +**Ownership:** +- `owner` - Server owner information +- `flash` - USB flash drive details + +## Example Usage + +``` +/unraid-info overview +/unraid-info array +/unraid-info metrics +/unraid-info ups_device [device-id] +``` + +Use the tool to retrieve the requested information and present it in a clear, formatted manner. diff --git a/commands/keys.md b/commands/keys.md new file mode 100644 index 0000000..56bf8f8 --- /dev/null +++ b/commands/keys.md @@ -0,0 +1,37 @@ +--- +description: Manage Unraid API keys for authentication +argument-hint: [action] [key-id] +--- + +Execute the `unraid_keys` MCP tool with action: `$1` + +## Available Actions (5) + +**Query Operations:** +- `list` - List all API keys with metadata +- `get` - Get details for a specific API key (requires key_id) + +**Management Operations:** +- `create` - Create a new API key (requires name, optional description and expiry) +- `update` - Update an existing API key (requires key_id, name, description) + +**⚠️ Destructive:** +- `delete` - Permanently revoke an API key (requires key_id + confirmation) + +## Example Usage + +``` +/unraid-keys list +/unraid-keys get [key-id] +/unraid-keys create "MCP Server Key" "Key for unraid-mcp integration" +/unraid-keys update [key-id] "Updated Name" "Updated description" +``` + +**Key Format:** PrefixedID (`hex64:suffix`) + +**IMPORTANT:** +- Deleted keys are immediately revoked and cannot be recovered +- Store new keys securely - they're only shown once during creation +- Set expiry dates for keys used in automation + +Use the tool to execute the requested API key operation and report the results. diff --git a/commands/notifications.md b/commands/notifications.md new file mode 100644 index 0000000..84716c4 --- /dev/null +++ b/commands/notifications.md @@ -0,0 +1,41 @@ +--- +description: Manage Unraid system notifications and alerts +argument-hint: [action] [additional-args] +--- + +Execute the `unraid_notifications` MCP tool with action: `$1` + +## Available Actions (9) + +**Query Operations:** +- `overview` - Summary of notification counts by category +- `list` - List all notifications with details +- `warnings` - List only warning/error notifications +- `unread` - List unread notifications only + +**Management Operations:** +- `create` - Create a new notification (requires title, message, severity) +- `archive` - Archive a specific notification (requires notification_id) +- `archive_all` - Archive all current notifications + +**⚠️ Destructive Operations:** +- `delete` - Permanently delete a notification (requires notification_id + confirmation) +- `delete_archived` - Permanently delete all archived notifications (requires confirmation) + +## Example Usage + +``` +/unraid-notifications overview +/unraid-notifications list +/unraid-notifications warnings +/unraid-notifications unread +/unraid-notifications create "Test Alert" "This is a test" normal +/unraid-notifications archive [notification-id] +/unraid-notifications archive_all +``` + +**Severity Levels:** `normal`, `warning`, `alert`, `critical` + +**IMPORTANT:** Delete operations are permanent and cannot be undone. + +Use the tool to execute the requested notification operation and present results clearly. diff --git a/commands/rclone.md b/commands/rclone.md new file mode 100644 index 0000000..68124e4 --- /dev/null +++ b/commands/rclone.md @@ -0,0 +1,32 @@ +--- +description: Manage Rclone cloud storage remotes on Unraid +argument-hint: [action] [remote-name] +--- + +Execute the `unraid_rclone` MCP tool with action: `$1` + +## Available Actions (4) + +**Query Operations:** +- `list_remotes` - List all configured Rclone remotes +- `config_form` - Get configuration form for a remote type (requires remote_type) + +**Management Operations:** +- `create_remote` - Create a new Rclone remote (requires remote_name, remote_type, config) + +**⚠️ Destructive:** +- `delete_remote` - Permanently delete a remote (requires remote_name + confirmation) + +## Example Usage + +``` +/unraid-rclone list_remotes +/unraid-rclone config_form s3 +/unraid-rclone create_remote mybackup s3 {"access_key":"...","secret_key":"..."} +``` + +**Supported Remote Types:** s3, dropbox, google-drive, onedrive, backblaze, ftp, sftp, webdav, etc. + +**IMPORTANT:** Deleting a remote does NOT delete cloud data, only the local configuration. + +Use the tool to execute the requested Rclone operation and report the results. diff --git a/commands/storage.md b/commands/storage.md new file mode 100644 index 0000000..37acb37 --- /dev/null +++ b/commands/storage.md @@ -0,0 +1,33 @@ +--- +description: Query Unraid storage, shares, and disk information +argument-hint: [action] [additional-args] +--- + +Execute the `unraid_storage` MCP tool with action: `$1` + +## Available Actions (6) + +**Shares & Disks:** +- `shares` - List all user shares with sizes and allocation +- `disks` - List all disks in the array +- `disk_details` - Get detailed info for a specific disk (requires disk identifier) +- `unassigned` - List unassigned devices + +**Logs:** +- `log_files` - List available system log files +- `logs` - Read log file contents (requires log file path) + +## Example Usage + +``` +/unraid-storage shares +/unraid-storage disks +/unraid-storage disk_details disk1 +/unraid-storage unassigned +/unraid-storage log_files +/unraid-storage logs /var/log/syslog +``` + +**Note:** Log file paths must start with `/var/log/`, `/boot/logs/`, or `/mnt/` + +Use the tool to retrieve the requested storage information and present it clearly. diff --git a/commands/users.md b/commands/users.md new file mode 100644 index 0000000..b4a1033 --- /dev/null +++ b/commands/users.md @@ -0,0 +1,31 @@ +--- +description: Query current authenticated Unraid user +argument-hint: [action] +--- + +Execute the `unraid_users` MCP tool with action: `$1` + +## Available Actions (1) + +**Query Operation:** +- `me` - Get current authenticated user info (id, name, description, roles) + +## Example Usage + +``` +/users me +``` + +## API Limitation + +⚠️ **Note:** The Unraid GraphQL API does not support user management operations. Only the `me` query is available, which returns information about the currently authenticated user (the API key holder). + +**Not supported:** +- Listing all users +- Getting other user details +- Adding/deleting users +- Cloud/remote access queries + +For user management, use the Unraid web UI. + +Use the tool to query the current authenticated user and report the results. diff --git a/commands/vm.md b/commands/vm.md new file mode 100644 index 0000000..78923e0 --- /dev/null +++ b/commands/vm.md @@ -0,0 +1,41 @@ +--- +description: Manage virtual machines on Unraid +argument-hint: [action] [vm-id] +--- + +Execute the `unraid_vm` MCP tool with action: `$1` and vm_id: `$2` + +## Available Actions (9) + +**Query Operations:** +- `list` - List all VMs with status and resource allocation +- `details` - Get detailed info for a VM (requires vm_id) + +**Lifecycle Operations:** +- `start` - Start a stopped VM (requires vm_id) +- `stop` - Gracefully stop a running VM (requires vm_id) +- `pause` - Pause a running VM (requires vm_id) +- `resume` - Resume a paused VM (requires vm_id) +- `reboot` - Gracefully reboot a VM (requires vm_id) + +**⚠️ Destructive Operations:** +- `force_stop` - Forcefully power off VM (like pulling power cord - requires vm_id + confirmation) +- `reset` - Hard reset VM (power cycle without graceful shutdown - requires vm_id + confirmation) + +## Example Usage + +``` +/unraid-vm list +/unraid-vm details windows-10 +/unraid-vm start ubuntu-server +/unraid-vm stop windows-10 +/unraid-vm pause debian-vm +/unraid-vm resume debian-vm +/unraid-vm reboot ubuntu-server +``` + +**VM Identification:** Use VM ID (PrefixedID format: `hex64:suffix`) + +**IMPORTANT:** `force_stop` and `reset` bypass graceful shutdown and may corrupt VM filesystem. Use `stop` instead for safe shutdowns. + +Use the tool to execute the requested VM operation and report the results. diff --git a/docs/DESTRUCTIVE_ACTIONS.md b/docs/DESTRUCTIVE_ACTIONS.md new file mode 100644 index 0000000..0cfa78f --- /dev/null +++ b/docs/DESTRUCTIVE_ACTIONS.md @@ -0,0 +1,240 @@ +# Destructive Actions Inventory + +This file lists all destructive actions across the unraid-mcp tools. Fill in the "Testing Strategy" column to specify how each should be tested in the mcporter integration test suite. + +**Last Updated:** 2026-02-15 + +--- + +## Summary + +- **Total Destructive Actions:** 8 (after removing 4 array operations) +- **Tools with Destructive Actions:** 6 +- **Environment Variable Gates:** 6 (one per tool) + +--- + +## Destructive Actions by Tool + +### 1. Docker (1 action) + +| Action | Description | Risk Level | Env Var Gate | Testing Strategy | +|--------|-------------|------------|--------------|------------------| +| `remove` | Permanently delete a Docker container | **HIGH** - Data loss, irreversible | `UNRAID_ALLOW_DOCKER_DESTRUCTIVE` | **TODO: Specify testing approach** | + +**Notes:** +- Container must be stopped first +- Removes container config and any non-volume data +- Cannot be undone + +--- + +### 2. Virtual Machines (2 actions) + +| Action | Description | Risk Level | Env Var Gate | Testing Strategy | +|--------|-------------|------------|--------------|------------------| +| `force_stop` | Forcefully power off a running VM (equivalent to pulling power cord) | **MEDIUM** - Severe but recoverable, risk of data corruption | `UNRAID_ALLOW_VM_DESTRUCTIVE` | **TODO: Specify testing approach** | +| `reset` | Hard reset a VM (power cycle without graceful shutdown) | **MEDIUM** - Severe but recoverable, risk of data corruption | `UNRAID_ALLOW_VM_DESTRUCTIVE` | **TODO: Specify testing approach** | + +**Notes:** +- Both bypass graceful shutdown procedures +- May corrupt VM filesystem if used during write operations +- Use `stop` action instead for graceful shutdown + +--- + +### 3. Notifications (2 actions) + +| Action | Description | Risk Level | Env Var Gate | Testing Strategy | +|--------|-------------|------------|--------------|------------------| +| `delete` | Permanently delete a notification | **HIGH** - Data loss, irreversible | `UNRAID_ALLOW_NOTIFICATIONS_DESTRUCTIVE` | **TODO: Specify testing approach** | +| `delete_archived` | Permanently delete all archived notifications | **HIGH** - Bulk data loss, irreversible | `UNRAID_ALLOW_NOTIFICATIONS_DESTRUCTIVE` | **TODO: Specify testing approach** | + +**Notes:** +- Cannot recover deleted notifications +- `delete_archived` affects ALL archived notifications (bulk operation) + +--- + +### 4. Rclone (1 action) + +| Action | Description | Risk Level | Env Var Gate | Testing Strategy | +|--------|-------------|------------|--------------|------------------| +| `delete_remote` | Permanently delete an rclone remote configuration | **HIGH** - Data loss, irreversible | `UNRAID_ALLOW_RCLONE_DESTRUCTIVE` | **TODO: Specify testing approach** | + +**Notes:** +- Removes cloud storage connection configuration +- Does NOT delete data in the remote storage +- Must reconfigure remote from scratch if deleted + +--- + +### 5. Users (1 action) + +| Action | Description | Risk Level | Env Var Gate | Testing Strategy | +|--------|-------------|------------|--------------|------------------| +| `delete` | Permanently delete a user account | **HIGH** - Data loss, irreversible | `UNRAID_ALLOW_USERS_DESTRUCTIVE` | **TODO: Specify testing approach** | + +**Notes:** +- Removes user account and permissions +- Cannot delete the root user +- User's data may remain but become orphaned + +--- + +### 6. API Keys (1 action) + +| Action | Description | Risk Level | Env Var Gate | Testing Strategy | +|--------|-------------|------------|--------------|------------------| +| `delete` | Permanently delete an API key | **HIGH** - Data loss, irreversible, breaks integrations | `UNRAID_ALLOW_KEYS_DESTRUCTIVE` | **TODO: Specify testing approach** | + +**Notes:** +- Immediately revokes API key access +- Will break any integrations using the deleted key +- Cannot be undone - must create new key + +--- + +## Removed Actions (No Longer Exposed) + +These actions were previously marked as destructive but have been **removed** from the array tool per the implementation plan: + +| Action | Former Risk Level | Reason for Removal | +|--------|-------------------|-------------------| +| `start` | CRITICAL | System-wide impact - should not be exposed via MCP | +| `stop` | CRITICAL | System-wide impact - should not be exposed via MCP | +| `shutdown` | CRITICAL | System-wide impact - could cause data loss | +| `reboot` | CRITICAL | System-wide impact - disrupts all services | + +--- + +## Testing Strategy Options + +Choose one of the following for each action in the "Testing Strategy" column: + +### Option 1: Mock/Validation Only +- Test parameter validation +- Test `confirm=True` requirement +- Test env var gate requirement +- **DO NOT** execute the actual action + +### Option 2: Dry-Run Testing +- Test with `confirm=false` to verify rejection +- Test without env var to verify gate +- **DO NOT** execute with both gates passed + +### Option 3: Test Server Execution +- Execute on a dedicated test Unraid server (e.g., shart) +- Requires pre-created test resources (containers, VMs, notifications) +- Verify action succeeds and state changes as expected +- Clean up after test + +### Option 4: Manual Test Checklist +- Document manual verification steps +- Do not automate in mcporter suite +- Requires human operator to execute and verify + +### Option 5: Skip Testing +- Too dangerous to automate +- Rely on unit tests only +- Document why testing is skipped + +--- + +## Example Testing Strategies + +**Safe approach (recommended for most):** +``` +Option 1: Mock/Validation Only +- Verify action requires UNRAID_ALLOW_DOCKER_DESTRUCTIVE=true +- Verify action requires confirm=True +- Do not execute actual deletion +``` + +**Comprehensive approach (for test server only):** +``` +Option 3: Test Server Execution on 'shart' +- Create test container 'mcporter-test-container' +- Execute remove with gates enabled +- Verify container is deleted +- Clean up not needed (container already removed) +``` + +**Hybrid approach:** +``` +Option 1 + Option 4: Mock validation + Manual checklist +- Automated: Test gate requirements +- Manual: Human operator verifies on test server +``` + +--- + +## Usage in mcporter Tests + +Each tool test script will check the testing strategy: + +```bash +# Example from test_docker.sh +test_remove_action() { + local strategy="TODO: Specify testing approach" # From this file + + case "$strategy" in + *"Option 1"*|*"Mock"*) + # Mock/validation testing + test_remove_requires_env_var + test_remove_requires_confirm + ;; + *"Option 3"*|*"Test Server"*) + # Real execution on test server + if [[ "$UNRAID_TEST_SERVER" != "unraid-shart" ]]; then + echo "SKIP: Destructive test only runs on test server" + return 2 + fi + test_remove_real_execution + ;; + *"Option 5"*|*"Skip"*) + echo "SKIP: Testing disabled for this action" + return 2 + ;; + esac +} +``` + +--- + +## Security Model + +**Two-tier security for destructive actions:** + +1. **Environment Variable Gate** (first line of defense) + - Must be explicitly enabled per tool + - Defaults to disabled (safe) + - Prevents accidental execution + +2. **Runtime Confirmation** (second line of defense) + - Must pass `confirm=True` in each call + - Forces explicit acknowledgment per operation + - Cannot be cached or preset + +**Both must pass for execution.** + +--- + +## Next Steps + +1. **Fill in Testing Strategy column** for each action above +2. **Create test fixtures** if using Option 3 (test containers, VMs, etc.) +3. **Implement tool test scripts** following the specified strategies +4. **Document any special setup** required for destructive testing + +--- + +## Questions to Consider + +For each action, ask: +- Is this safe to automate on a test server? +- Do we have test fixtures/resources available? +- What cleanup is required after testing? +- What's the blast radius if something goes wrong? +- Can we verify the action worked without side effects? + diff --git a/MARKETPLACE.md b/docs/MARKETPLACE.md similarity index 100% rename from MARKETPLACE.md rename to docs/MARKETPLACE.md diff --git a/PUBLISHING.md b/docs/PUBLISHING.md similarity index 100% rename from PUBLISHING.md rename to docs/PUBLISHING.md diff --git a/docs/UNRAID_API_COMPLETE_REFERENCE.md b/docs/UNRAID_API_COMPLETE_REFERENCE.md new file mode 100644 index 0000000..a9abd61 --- /dev/null +++ b/docs/UNRAID_API_COMPLETE_REFERENCE.md @@ -0,0 +1,2393 @@ +# Unraid GraphQL API Complete Schema Reference + +Generated via live GraphQL introspection for the configured endpoint and API key. + +This is permission-scoped: it contains everything visible to the API key used. + +## Table of Contents +- [Schema Summary](#schema-summary) +- [Root Operations](#root-operations) +- [Directives](#directives) +- [All Types (Alphabetical)](#all-types-alphabetical) + +## Schema Summary +- Query root: `Query` +- Mutation root: `Mutation` +- Subscription root: `Subscription` +- Total types: **156** +- Total directives: **6** +- Type kinds: +- `ENUM`: 30 +- `INPUT_OBJECT`: 16 +- `INTERFACE`: 2 +- `OBJECT`: 97 +- `SCALAR`: 10 +- `UNION`: 1 + +## Root Operations +### Queries +Total fields: **46** + +- `apiKey(id: PrefixedID!): ApiKey` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** +- `apiKeyPossiblePermissions(): [Permission!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: All possible permissions for API keys +- `apiKeyPossibleRoles(): [Role!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: All possible roles for API keys +- `apiKeys(): [ApiKey!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** +- `array(): UnraidArray!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** +- `config(): Config!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** +- `customization(): Customization` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CUSTOMIZATIONS** +- `disk(id: PrefixedID!): Disk!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **DISK** +- `disks(): [Disk!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **DISK** +- `docker(): Docker!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **DOCKER** +- `flash(): Flash!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **FLASH** +- `getApiKeyCreationFormSchema(): ApiKeyFormSettings!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** #### Description: Get JSON Schema for API key creation form +- `getAvailableAuthActions(): [AuthAction!]!` + - Get all available authentication actions with possession +- `getPermissionsForRoles(roles: [Role!]!): [Permission!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: Get the actual permissions that would be granted by a set of roles +- `info(): Info!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `isInitialSetup(): Boolean!` +- `isSSOEnabled(): Boolean!` +- `logFile(lines: Int, path: String!, startLine: Int): LogFileContent!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** +- `logFiles(): [LogFile!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** +- `me(): UserAccount!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ME** +- `metrics(): Metrics!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `notifications(): Notifications!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** #### Description: Get all notifications +- `oidcConfiguration(): OidcConfiguration!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get the full OIDC configuration (admin only) +- `oidcProvider(id: PrefixedID!): OidcProvider` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get a specific OIDC provider by ID +- `oidcProviders(): [OidcProvider!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get all configured OIDC providers (admin only) +- `online(): Boolean!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ONLINE** +- `owner(): Owner!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **OWNER** +- `parityHistory(): [ParityCheck!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** +- `plugins(): [Plugin!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: List all installed plugins with their metadata +- `previewEffectivePermissions(permissions: [AddPermissionInput!], roles: [Role!]): [Permission!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: Preview the effective permissions for a combination of roles and explicit permissions +- `publicOidcProviders(): [PublicOidcProvider!]!` + - Get public OIDC provider information for login buttons +- `publicPartnerInfo(): PublicPartnerInfo` +- `publicTheme(): Theme!` +- `rclone(): RCloneBackupSettings!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **FLASH** +- `registration(): Registration` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **REGISTRATION** +- `server(): Server` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** +- `servers(): [Server!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** +- `services(): [Service!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SERVICES** +- `settings(): Settings!` +- `shares(): [Share!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SHARE** +- `upsConfiguration(): UPSConfiguration!` +- `upsDeviceById(id: String!): UPSDevice` +- `upsDevices(): [UPSDevice!]!` +- `validateOidcSession(token: String!): OidcSessionValidation!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Validate an OIDC session token (internal use for CLI validation) +- `vars(): Vars!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **VARS** +- `vms(): Vms!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **VMS** #### Description: Get information about all VMs on the system + +### Mutations +Total fields: **22** + +- `addPlugin(input: PluginManagementInput!): Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **CONFIG** #### Description: Add one or more plugins to the API. Returns false if restart was triggered automatically, true if manual restart is required. +- `apiKey(): ApiKeyMutations!` +- `archiveAll(importance: NotificationImportance): NotificationOverview!` +- `archiveNotification(id: PrefixedID!): Notification!` + - Marks a notification as archived. +- `archiveNotifications(ids: [PrefixedID!]!): NotificationOverview!` +- `array(): ArrayMutations!` +- `configureUps(config: UPSConfigInput!): Boolean!` +- `createNotification(input: NotificationData!): Notification!` + - Creates a new notification record +- `customization(): CustomizationMutations!` +- `deleteArchivedNotifications(): NotificationOverview!` + - Deletes all archived notifications on server. +- `deleteNotification(id: PrefixedID!, type: NotificationType!): NotificationOverview!` +- `docker(): DockerMutations!` +- `initiateFlashBackup(input: InitiateFlashBackupInput!): FlashBackupStatus!` + - Initiates a flash drive backup using a configured remote. +- `parityCheck(): ParityCheckMutations!` +- `rclone(): RCloneMutations!` +- `recalculateOverview(): NotificationOverview!` + - Reads each notification to recompute & update the overview. +- `removePlugin(input: PluginManagementInput!): Boolean!` + - #### Required Permissions: - Action: **DELETE_ANY** - Resource: **CONFIG** #### Description: Remove one or more plugins from the API. Returns false if restart was triggered automatically, true if manual restart is required. +- `unarchiveAll(importance: NotificationImportance): NotificationOverview!` +- `unarchiveNotifications(ids: [PrefixedID!]!): NotificationOverview!` +- `unreadNotification(id: PrefixedID!): Notification!` + - Marks a notification as unread. +- `updateSettings(input: JSON!): UpdateSettingsResponse!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **CONFIG** +- `vm(): VmMutations!` + +### Subscriptions +Total fields: **11** + +- `arraySubscription(): UnraidArray!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** +- `logFile(path: String!): LogFileContent!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** +- `notificationAdded(): Notification!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** +- `notificationsOverview(): NotificationOverview!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** +- `ownerSubscription(): Owner!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **OWNER** +- `parityHistorySubscription(): ParityCheck!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** +- `serversSubscription(): Server!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** +- `systemMetricsCpu(): CpuUtilization!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `systemMetricsCpuTelemetry(): CpuPackages!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `systemMetricsMemory(): MemoryUtilization!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `upsUpdates(): UPSDevice!` + +## Directives +### `@deprecated` +Marks an element of a GraphQL schema as no longer supported. + +- Locations: `FIELD_DEFINITION`, `ARGUMENT_DEFINITION`, `INPUT_FIELD_DEFINITION`, `ENUM_VALUE` +- Arguments: + - `reason`: `String` (default: `"No longer supported"`) + - Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/). + +### `@include` +Directs the executor to include this field or fragment only when the `if` argument is true. + +- Locations: `FIELD`, `FRAGMENT_SPREAD`, `INLINE_FRAGMENT` +- Arguments: + - `if`: `Boolean!` + - Included when true. + +### `@oneOf` +Indicates exactly one field must be supplied and this field must not be `null`. + +- Locations: `INPUT_OBJECT` + +### `@skip` +Directs the executor to skip this field or fragment when the `if` argument is true. + +- Locations: `FIELD`, `FRAGMENT_SPREAD`, `INLINE_FRAGMENT` +- Arguments: + - `if`: `Boolean!` + - Skipped when true. + +### `@specifiedBy` +Exposes a URL that specifies the behavior of this scalar. + +- Locations: `SCALAR` +- Arguments: + - `url`: `String!` + - The URL that specifies the behavior of this scalar. + +### `@usePermissions` +Directive to document required permissions for fields + +- Locations: `FIELD_DEFINITION` +- Arguments: + - `action`: `String` + - The action required for access (must be a valid AuthAction enum value) + - `resource`: `String` + - The resource required for access (must be a valid Resource enum value) + +## All Types (Alphabetical) +### `ActivationCode` (OBJECT) +- Fields (11): +- `background`: `String` +- `code`: `String` +- `comment`: `String` +- `header`: `String` +- `headermetacolor`: `String` +- `partnerName`: `String` +- `partnerUrl`: `String` +- `serverName`: `String` +- `showBannerGradient`: `Boolean` +- `sysModel`: `String` +- `theme`: `String` + +### `AddPermissionInput` (INPUT_OBJECT) +- Input fields (2): +- `actions`: `[AuthAction!]!` +- `resource`: `Resource!` + +### `AddRoleForApiKeyInput` (INPUT_OBJECT) +- Input fields (2): +- `apiKeyId`: `PrefixedID!` +- `role`: `Role!` + +### `ApiConfig` (OBJECT) +- Fields (5): +- `extraOrigins`: `[String!]!` +- `plugins`: `[String!]!` +- `sandbox`: `Boolean` +- `ssoSubIds`: `[String!]!` +- `version`: `String!` + +### `ApiKey` (OBJECT) +- Implements: `Node` +- Fields (7): +- `createdAt`: `String!` +- `description`: `String` +- `id`: `PrefixedID!` +- `key`: `String!` +- `name`: `String!` +- `permissions`: `[Permission!]!` +- `roles`: `[Role!]!` + +### `ApiKeyFormSettings` (OBJECT) +- Implements: `FormSchema`, `Node` +- Fields (4): +- `dataSchema`: `JSON!` + - The data schema for the API key form +- `id`: `PrefixedID!` +- `uiSchema`: `JSON!` + - The UI schema for the API key form +- `values`: `JSON!` + - The current values of the API key form + +### `ApiKeyMutations` (OBJECT) +API Key related mutations + +- Fields (5): +- `addRole`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **API_KEY** #### Description: Add a role to an API key + - Arguments: + - `input`: `AddRoleForApiKeyInput!` +- `create`: `ApiKey!` + - #### Required Permissions: - Action: **CREATE_ANY** - Resource: **API_KEY** #### Description: Create an API key + - Arguments: + - `input`: `CreateApiKeyInput!` +- `delete`: `Boolean!` + - #### Required Permissions: - Action: **DELETE_ANY** - Resource: **API_KEY** #### Description: Delete one or more API keys + - Arguments: + - `input`: `DeleteApiKeyInput!` +- `removeRole`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **API_KEY** #### Description: Remove a role from an API key + - Arguments: + - `input`: `RemoveRoleFromApiKeyInput!` +- `update`: `ApiKey!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **API_KEY** #### Description: Update an API key + - Arguments: + - `input`: `UpdateApiKeyInput!` + +### `ArrayCapacity` (OBJECT) +- Fields (2): +- `disks`: `Capacity!` + - Capacity in number of disks +- `kilobytes`: `Capacity!` + - Capacity in kilobytes + +### `ArrayDisk` (OBJECT) +- Implements: `Node` +- Fields (24): +- `color`: `ArrayDiskFsColor` +- `comment`: `String` + - User comment on disk +- `critical`: `Int` + - (%) Disk space left for critical +- `device`: `String` +- `exportable`: `Boolean` +- `format`: `String` + - File format (ex MBR: 4KiB-aligned) +- `fsFree`: `BigInt` + - (KB) Free Size on the FS (Not present on Parity type drive) +- `fsSize`: `BigInt` + - (KB) Total Size of the FS (Not present on Parity type drive) +- `fsType`: `String` + - File system type for the disk +- `fsUsed`: `BigInt` + - (KB) Used Size on the FS (Not present on Parity type drive) +- `id`: `PrefixedID!` +- `idx`: `Int!` + - Array slot number. Parity1 is always 0 and Parity2 is always 29. Array slots will be 1 - 28. Cache slots are 30 - 53. Flash is 54. +- `isSpinning`: `Boolean` + - Whether the disk is currently spinning +- `name`: `String` +- `numErrors`: `BigInt` + - Number of unrecoverable errors reported by the device I/O drivers. Missing data due to unrecoverable array read errors is filled in on-the-fly using parity reconstruct (and we attempt to write this data back to the sector(s) which failed). Any unrecoverable write error results in disabling the disk. +- `numReads`: `BigInt` + - Count of I/O read requests sent to the device I/O drivers. These statistics may be cleared at any time. +- `numWrites`: `BigInt` + - Count of I/O writes requests sent to the device I/O drivers. These statistics may be cleared at any time. +- `rotational`: `Boolean` + - Is the disk a HDD or SSD. +- `size`: `BigInt` + - (KB) Disk Size total +- `status`: `ArrayDiskStatus` +- `temp`: `Int` + - Disk temp - will be NaN if array is not started or DISK_NP +- `transport`: `String` + - ata | nvme | usb | (others) +- `type`: `ArrayDiskType!` + - Type of Disk - used to differentiate Cache / Flash / Array / Parity +- `warning`: `Int` + - (%) Disk space left to warn + +### `ArrayDiskFsColor` (ENUM) +- Enum values (9): + - `BLUE_BLINK` + - `BLUE_ON` + - `GREEN_BLINK` + - `GREEN_ON` + - `GREY_OFF` + - `RED_OFF` + - `RED_ON` + - `YELLOW_BLINK` + - `YELLOW_ON` + +### `ArrayDiskInput` (INPUT_OBJECT) +- Input fields (2): +- `id`: `PrefixedID!` + - Disk ID +- `slot`: `Int` + - The slot for the disk + +### `ArrayDiskStatus` (ENUM) +- Enum values (9): + - `DISK_DSBL` + - `DISK_DSBL_NEW` + - `DISK_INVALID` + - `DISK_NEW` + - `DISK_NP` + - `DISK_NP_DSBL` + - `DISK_NP_MISSING` + - `DISK_OK` + - `DISK_WRONG` + +### `ArrayDiskType` (ENUM) +- Enum values (4): + - `CACHE` + - `DATA` + - `FLASH` + - `PARITY` + +### `ArrayMutations` (OBJECT) +- Fields (6): +- `addDiskToArray`: `UnraidArray!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Add new disk to array + - Arguments: + - `input`: `ArrayDiskInput!` +- `clearArrayDiskStatistics`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Clear statistics for a disk in the array + - Arguments: + - `id`: `PrefixedID!` +- `mountArrayDisk`: `ArrayDisk!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Mount a disk in the array + - Arguments: + - `id`: `PrefixedID!` +- `removeDiskFromArray`: `UnraidArray!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Remove existing disk from array. NOTE: The array must be stopped before running this otherwise it'll throw an error. + - Arguments: + - `input`: `ArrayDiskInput!` +- `setState`: `UnraidArray!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Set array state + - Arguments: + - `input`: `ArrayStateInput!` +- `unmountArrayDisk`: `ArrayDisk!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Unmount a disk from the array + - Arguments: + - `id`: `PrefixedID!` + +### `ArrayState` (ENUM) +- Enum values (11): + - `DISABLE_DISK` + - `INVALID_EXPANSION` + - `NEW_ARRAY` + - `NEW_DISK_TOO_SMALL` + - `NO_DATA_DISKS` + - `PARITY_NOT_BIGGEST` + - `RECON_DISK` + - `STARTED` + - `STOPPED` + - `SWAP_DSBL` + - `TOO_MANY_MISSING_DISKS` + +### `ArrayStateInput` (INPUT_OBJECT) +- Input fields (1): +- `desiredState`: `ArrayStateInputState!` + - Array state + +### `ArrayStateInputState` (ENUM) +- Enum values (2): + - `START` + - `STOP` + +### `AuthAction` (ENUM) +Authentication actions with possession (e.g., create:any, read:own) + +- Enum values (8): + - `CREATE_ANY` + - Create any resource + - `CREATE_OWN` + - Create own resource + - `DELETE_ANY` + - Delete any resource + - `DELETE_OWN` + - Delete own resource + - `READ_ANY` + - Read any resource + - `READ_OWN` + - Read own resource + - `UPDATE_ANY` + - Update any resource + - `UPDATE_OWN` + - Update own resource + +### `AuthorizationOperator` (ENUM) +Operators for authorization rule matching + +- Enum values (4): + - `CONTAINS` + - `ENDS_WITH` + - `EQUALS` + - `STARTS_WITH` + +### `AuthorizationRuleMode` (ENUM) +Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass) + +- Enum values (2): + - `AND` + - `OR` + +### `BigInt` (SCALAR) +The `BigInt` scalar type represents non-fractional signed whole numeric values. + +- Scalar type + +### `Boolean` (SCALAR) +The `Boolean` scalar type represents `true` or `false`. + +- Scalar type + +### `Capacity` (OBJECT) +- Fields (3): +- `free`: `String!` + - Free capacity +- `total`: `String!` + - Total capacity +- `used`: `String!` + - Used capacity + +### `Config` (OBJECT) +- Implements: `Node` +- Fields (3): +- `error`: `String` +- `id`: `PrefixedID!` +- `valid`: `Boolean` + +### `ConfigErrorState` (ENUM) +Possible error states for configuration + +- Enum values (5): + - `INELIGIBLE` + - `INVALID` + - `NO_KEY_SERVER` + - `UNKNOWN_ERROR` + - `WITHDRAWN` + +### `ContainerHostConfig` (OBJECT) +- Fields (1): +- `networkMode`: `String!` + +### `ContainerPort` (OBJECT) +- Fields (4): +- `ip`: `String` +- `privatePort`: `Port` +- `publicPort`: `Port` +- `type`: `ContainerPortType!` + +### `ContainerPortType` (ENUM) +- Enum values (2): + - `TCP` + - `UDP` + +### `ContainerState` (ENUM) +- Enum values (2): + - `EXITED` + - `RUNNING` + +### `CoreVersions` (OBJECT) +- Fields (3): +- `api`: `String` + - Unraid API version +- `kernel`: `String` + - Kernel version +- `unraid`: `String` + - Unraid version + +### `CpuLoad` (OBJECT) +CPU load for a single core + +- Fields (8): +- `percentGuest`: `Float!` + - The percentage of time the CPU spent running virtual machines (guest). +- `percentIdle`: `Float!` + - The percentage of time the CPU was idle. +- `percentIrq`: `Float!` + - The percentage of time the CPU spent servicing hardware interrupts. +- `percentNice`: `Float!` + - The percentage of time the CPU spent on low-priority (niced) user space processes. +- `percentSteal`: `Float!` + - The percentage of CPU time stolen by the hypervisor. +- `percentSystem`: `Float!` + - The percentage of time the CPU spent in kernel space. +- `percentTotal`: `Float!` + - The total CPU load on a single core, in percent. +- `percentUser`: `Float!` + - The percentage of time the CPU spent in user space. + +### `CpuPackages` (OBJECT) +- Implements: `Node` +- Fields (4): +- `id`: `PrefixedID!` +- `power`: `[Float!]!` + - Power draw per package (W) +- `temp`: `[Float!]!` + - Temperature per package (°C) +- `totalPower`: `Float!` + - Total CPU package power draw (W) + +### `CpuUtilization` (OBJECT) +- Implements: `Node` +- Fields (3): +- `cpus`: `[CpuLoad!]!` + - CPU load for each core +- `id`: `PrefixedID!` +- `percentTotal`: `Float!` + - Total CPU load in percent + +### `CreateApiKeyInput` (INPUT_OBJECT) +- Input fields (5): +- `description`: `String` +- `name`: `String!` +- `overwrite`: `Boolean` + - This will replace the existing key if one already exists with the same name, otherwise returns the existing key +- `permissions`: `[AddPermissionInput!]` +- `roles`: `[Role!]` + +### `CreateRCloneRemoteInput` (INPUT_OBJECT) +- Input fields (3): +- `name`: `String!` +- `parameters`: `JSON!` +- `type`: `String!` + +### `Customization` (OBJECT) +- Fields (3): +- `activationCode`: `ActivationCode` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ACTIVATION_CODE** +- `partnerInfo`: `PublicPartnerInfo` +- `theme`: `Theme!` + +### `CustomizationMutations` (OBJECT) +Customization related mutations + +- Fields (1): +- `setTheme`: `Theme!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **CUSTOMIZATIONS** #### Description: Update the UI theme (writes dynamix.cfg) + - Arguments: + - `theme`: `ThemeName!` + - Theme to apply + +### `DateTime` (SCALAR) +A date-time string at UTC, such as 2019-12-03T09:54:33Z, compliant with the date-time format. + +- Scalar type + +### `DeleteApiKeyInput` (INPUT_OBJECT) +- Input fields (1): +- `ids`: `[PrefixedID!]!` + +### `DeleteRCloneRemoteInput` (INPUT_OBJECT) +- Input fields (1): +- `name`: `String!` + +### `Disk` (OBJECT) +- Implements: `Node` +- Fields (20): +- `bytesPerSector`: `Float!` + - The number of bytes per sector +- `device`: `String!` + - The device path of the disk (e.g. /dev/sdb) +- `firmwareRevision`: `String!` + - The firmware revision of the disk +- `id`: `PrefixedID!` +- `interfaceType`: `DiskInterfaceType!` + - The interface type of the disk +- `isSpinning`: `Boolean!` + - Whether the disk is spinning or not +- `name`: `String!` + - The model name of the disk +- `partitions`: `[DiskPartition!]!` + - The partitions on the disk +- `sectorsPerTrack`: `Float!` + - The number of sectors per track +- `serialNum`: `String!` + - The serial number of the disk +- `size`: `Float!` + - The total size of the disk in bytes +- `smartStatus`: `DiskSmartStatus!` + - The SMART status of the disk +- `temperature`: `Float` + - The current temperature of the disk in Celsius +- `totalCylinders`: `Float!` + - The total number of cylinders on the disk +- `totalHeads`: `Float!` + - The total number of heads on the disk +- `totalSectors`: `Float!` + - The total number of sectors on the disk +- `totalTracks`: `Float!` + - The total number of tracks on the disk +- `tracksPerCylinder`: `Float!` + - The number of tracks per cylinder +- `type`: `String!` + - The type of disk (e.g. SSD, HDD) +- `vendor`: `String!` + - The manufacturer of the disk + +### `DiskFsType` (ENUM) +The type of filesystem on the disk partition + +- Enum values (6): + - `BTRFS` + - `EXT4` + - `NTFS` + - `VFAT` + - `XFS` + - `ZFS` + +### `DiskInterfaceType` (ENUM) +The type of interface the disk uses to connect to the system + +- Enum values (5): + - `PCIE` + - `SAS` + - `SATA` + - `UNKNOWN` + - `USB` + +### `DiskPartition` (OBJECT) +- Fields (3): +- `fsType`: `DiskFsType!` + - The filesystem type of the partition +- `name`: `String!` + - The name of the partition +- `size`: `Float!` + - The size of the partition in bytes + +### `DiskSmartStatus` (ENUM) +The SMART (Self-Monitoring, Analysis and Reporting Technology) status of the disk + +- Enum values (2): + - `OK` + - `UNKNOWN` + +### `Docker` (OBJECT) +- Implements: `Node` +- Fields (3): +- `containers`: `[DockerContainer!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **DOCKER** + - Arguments: + - `skipCache`: `Boolean!` (default: `false`) +- `id`: `PrefixedID!` +- `networks`: `[DockerNetwork!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **DOCKER** + - Arguments: + - `skipCache`: `Boolean!` (default: `false`) + +### `DockerContainer` (OBJECT) +- Implements: `Node` +- Fields (15): +- `autoStart`: `Boolean!` +- `command`: `String!` +- `created`: `Int!` +- `hostConfig`: `ContainerHostConfig` +- `id`: `PrefixedID!` +- `image`: `String!` +- `imageId`: `String!` +- `labels`: `JSON` +- `mounts`: `[JSON!]` +- `names`: `[String!]!` +- `networkSettings`: `JSON` +- `ports`: `[ContainerPort!]!` +- `sizeRootFs`: `BigInt` + - Total size of all files in the container (in bytes) +- `state`: `ContainerState!` +- `status`: `String!` + +### `DockerMutations` (OBJECT) +- Fields (2): +- `start`: `DockerContainer!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **DOCKER** #### Description: Start a container + - Arguments: + - `id`: `PrefixedID!` +- `stop`: `DockerContainer!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **DOCKER** #### Description: Stop a container + - Arguments: + - `id`: `PrefixedID!` + +### `DockerNetwork` (OBJECT) +- Implements: `Node` +- Fields (15): +- `attachable`: `Boolean!` +- `configFrom`: `JSON!` +- `configOnly`: `Boolean!` +- `containers`: `JSON!` +- `created`: `String!` +- `driver`: `String!` +- `enableIPv6`: `Boolean!` +- `id`: `PrefixedID!` +- `ingress`: `Boolean!` +- `internal`: `Boolean!` +- `ipam`: `JSON!` +- `labels`: `JSON!` +- `name`: `String!` +- `options`: `JSON!` +- `scope`: `String!` + +### `ExplicitStatusItem` (OBJECT) +- Fields (2): +- `name`: `String!` +- `updateStatus`: `UpdateStatus!` + +### `Flash` (OBJECT) +- Implements: `Node` +- Fields (4): +- `guid`: `String!` +- `id`: `PrefixedID!` +- `product`: `String!` +- `vendor`: `String!` + +### `FlashBackupStatus` (OBJECT) +- Fields (2): +- `jobId`: `String` + - Job ID if available, can be used to check job status. +- `status`: `String!` + - Status message indicating the outcome of the backup initiation. + +### `Float` (SCALAR) +The `Float` scalar type represents signed double-precision fractional values as specified by [IEEE 754](https://en.wikipedia.org/wiki/IEEE_floating_point). + +- Scalar type + +### `FormSchema` (INTERFACE) +- Interface fields (3): +- `dataSchema`: `JSON!` + - The data schema for the form +- `uiSchema`: `JSON!` + - The UI schema for the form +- `values`: `JSON!` + - The current values of the form +- Implemented by (2): `ApiKeyFormSettings`, `UnifiedSettings` + +### `ID` (SCALAR) +The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as `"4"`) or integer (such as `4`) input value will be accepted as an ID. + +- Scalar type + +### `Info` (OBJECT) +- Implements: `Node` +- Fields (11): +- `baseboard`: `InfoBaseboard!` + - Motherboard information +- `cpu`: `InfoCpu!` + - CPU information +- `devices`: `InfoDevices!` + - Device information +- `display`: `InfoDisplay!` + - Display configuration +- `id`: `PrefixedID!` +- `machineId`: `ID` + - Machine ID +- `memory`: `InfoMemory!` + - Memory information +- `os`: `InfoOs!` + - Operating system information +- `system`: `InfoSystem!` + - System information +- `time`: `DateTime!` + - Current server time +- `versions`: `InfoVersions!` + - Software versions + +### `InfoBaseboard` (OBJECT) +- Implements: `Node` +- Fields (8): +- `assetTag`: `String` + - Motherboard asset tag +- `id`: `PrefixedID!` +- `manufacturer`: `String` + - Motherboard manufacturer +- `memMax`: `Float` + - Maximum memory capacity in bytes +- `memSlots`: `Float` + - Number of memory slots +- `model`: `String` + - Motherboard model +- `serial`: `String` + - Motherboard serial number +- `version`: `String` + - Motherboard version + +### `InfoCpu` (OBJECT) +- Implements: `Node` +- Fields (20): +- `brand`: `String` + - CPU brand name +- `cache`: `JSON` + - CPU cache information +- `cores`: `Int` + - Number of CPU cores +- `family`: `String` + - CPU family +- `flags`: `[String!]` + - CPU feature flags +- `id`: `PrefixedID!` +- `manufacturer`: `String` + - CPU manufacturer +- `model`: `String` + - CPU model +- `packages`: `CpuPackages!` +- `processors`: `Int` + - Number of physical processors +- `revision`: `String` + - CPU revision +- `socket`: `String` + - CPU socket type +- `speed`: `Float` + - Current CPU speed in GHz +- `speedmax`: `Float` + - Maximum CPU speed in GHz +- `speedmin`: `Float` + - Minimum CPU speed in GHz +- `stepping`: `Int` + - CPU stepping +- `threads`: `Int` + - Number of CPU threads +- `topology`: `[[[Int!]!]!]!` + - Per-package array of core/thread pairs, e.g. [[[0,1],[2,3]], [[4,5],[6,7]]] +- `vendor`: `String` + - CPU vendor +- `voltage`: `String` + - CPU voltage + +### `InfoDevices` (OBJECT) +- Implements: `Node` +- Fields (5): +- `gpu`: `[InfoGpu!]` + - List of GPU devices +- `id`: `PrefixedID!` +- `network`: `[InfoNetwork!]` + - List of network interfaces +- `pci`: `[InfoPci!]` + - List of PCI devices +- `usb`: `[InfoUsb!]` + - List of USB devices + +### `InfoDisplay` (OBJECT) +- Implements: `Node` +- Fields (16): +- `case`: `InfoDisplayCase!` + - Case display configuration +- `critical`: `Int!` + - Critical temperature threshold +- `hot`: `Int!` + - Hot temperature threshold +- `id`: `PrefixedID!` +- `locale`: `String` + - Locale setting +- `max`: `Int` + - Maximum temperature threshold +- `resize`: `Boolean!` + - Enable UI resize +- `scale`: `Boolean!` + - Enable UI scaling +- `tabs`: `Boolean!` + - Show tabs in UI +- `text`: `Boolean!` + - Show text labels +- `theme`: `ThemeName!` + - UI theme name +- `total`: `Boolean!` + - Show totals +- `unit`: `Temperature!` + - Temperature unit (C or F) +- `usage`: `Boolean!` + - Show usage statistics +- `warning`: `Int!` + - Warning temperature threshold +- `wwn`: `Boolean!` + - Show WWN identifiers + +### `InfoDisplayCase` (OBJECT) +- Implements: `Node` +- Fields (5): +- `base64`: `String!` + - Base64 encoded case image +- `error`: `String!` + - Error message if any +- `icon`: `String!` + - Case icon identifier +- `id`: `PrefixedID!` +- `url`: `String!` + - Case image URL + +### `InfoGpu` (OBJECT) +- Implements: `Node` +- Fields (7): +- `blacklisted`: `Boolean!` + - Whether GPU is blacklisted +- `class`: `String!` + - Device class +- `id`: `PrefixedID!` +- `productid`: `String!` + - Product ID +- `type`: `String!` + - GPU type/manufacturer +- `typeid`: `String!` + - GPU type identifier +- `vendorname`: `String` + - Vendor name + +### `InfoMemory` (OBJECT) +- Implements: `Node` +- Fields (2): +- `id`: `PrefixedID!` +- `layout`: `[MemoryLayout!]!` + - Physical memory layout + +### `InfoNetwork` (OBJECT) +- Implements: `Node` +- Fields (8): +- `dhcp`: `Boolean` + - DHCP enabled flag +- `id`: `PrefixedID!` +- `iface`: `String!` + - Network interface name +- `mac`: `String` + - MAC address +- `model`: `String` + - Network interface model +- `speed`: `String` + - Network speed +- `vendor`: `String` + - Network vendor +- `virtual`: `Boolean` + - Virtual interface flag + +### `InfoOs` (OBJECT) +- Implements: `Node` +- Fields (15): +- `arch`: `String` + - OS architecture +- `build`: `String` + - OS build identifier +- `codename`: `String` + - OS codename +- `distro`: `String` + - Linux distribution name +- `fqdn`: `String` + - Fully qualified domain name +- `hostname`: `String` + - Hostname +- `id`: `PrefixedID!` +- `kernel`: `String` + - Kernel version +- `logofile`: `String` + - OS logo name +- `platform`: `String` + - Operating system platform +- `release`: `String` + - OS release version +- `serial`: `String` + - OS serial number +- `servicepack`: `String` + - Service pack version +- `uefi`: `Boolean` + - OS started via UEFI +- `uptime`: `String` + - Boot time ISO string + +### `InfoPci` (OBJECT) +- Implements: `Node` +- Fields (9): +- `blacklisted`: `String!` + - Blacklisted status +- `class`: `String!` + - Device class +- `id`: `PrefixedID!` +- `productid`: `String!` + - Product ID +- `productname`: `String` + - Product name +- `type`: `String!` + - Device type/manufacturer +- `typeid`: `String!` + - Type identifier +- `vendorid`: `String!` + - Vendor ID +- `vendorname`: `String` + - Vendor name + +### `InfoSystem` (OBJECT) +- Implements: `Node` +- Fields (8): +- `id`: `PrefixedID!` +- `manufacturer`: `String` + - System manufacturer +- `model`: `String` + - System model +- `serial`: `String` + - System serial number +- `sku`: `String` + - System SKU +- `uuid`: `String` + - System UUID +- `version`: `String` + - System version +- `virtual`: `Boolean` + - Virtual machine flag + +### `InfoUsb` (OBJECT) +- Implements: `Node` +- Fields (4): +- `bus`: `String` + - USB bus number +- `device`: `String` + - USB device number +- `id`: `PrefixedID!` +- `name`: `String!` + - USB device name + +### `InfoVersions` (OBJECT) +- Implements: `Node` +- Fields (3): +- `core`: `CoreVersions!` + - Core system versions +- `id`: `PrefixedID!` +- `packages`: `PackageVersions` + - Software package versions + +### `InitiateFlashBackupInput` (INPUT_OBJECT) +- Input fields (4): +- `destinationPath`: `String!` + - Destination path on the remote. +- `options`: `JSON` + - Additional options for the backup operation, such as --dry-run or --transfers. +- `remoteName`: `String!` + - The name of the remote configuration to use for the backup. +- `sourcePath`: `String!` + - Source path to backup (typically the flash drive). + +### `Int` (SCALAR) +The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1. + +- Scalar type + +### `JSON` (SCALAR) +The `JSON` scalar type represents JSON values as specified by [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf). + +- Scalar type + +### `KeyFile` (OBJECT) +- Fields (2): +- `contents`: `String` +- `location`: `String` + +### `LogFile` (OBJECT) +- Fields (4): +- `modifiedAt`: `DateTime!` + - Last modified timestamp +- `name`: `String!` + - Name of the log file +- `path`: `String!` + - Full path to the log file +- `size`: `Int!` + - Size of the log file in bytes + +### `LogFileContent` (OBJECT) +- Fields (4): +- `content`: `String!` + - Content of the log file +- `path`: `String!` + - Path to the log file +- `startLine`: `Int` + - Starting line number of the content (1-indexed) +- `totalLines`: `Int!` + - Total number of lines in the file + +### `MemoryLayout` (OBJECT) +- Implements: `Node` +- Fields (12): +- `bank`: `String` + - Memory bank location (e.g., BANK 0) +- `clockSpeed`: `Int` + - Memory clock speed in MHz +- `formFactor`: `String` + - Form factor (e.g., DIMM, SODIMM) +- `id`: `PrefixedID!` +- `manufacturer`: `String` + - Memory manufacturer +- `partNum`: `String` + - Part number of the memory module +- `serialNum`: `String` + - Serial number of the memory module +- `size`: `BigInt!` + - Memory module size in bytes +- `type`: `String` + - Memory type (e.g., DDR4, DDR5) +- `voltageConfigured`: `Int` + - Configured voltage in millivolts +- `voltageMax`: `Int` + - Maximum voltage in millivolts +- `voltageMin`: `Int` + - Minimum voltage in millivolts + +### `MemoryUtilization` (OBJECT) +- Implements: `Node` +- Fields (12): +- `active`: `BigInt!` + - Active memory in bytes +- `available`: `BigInt!` + - Available memory in bytes +- `buffcache`: `BigInt!` + - Buffer/cache memory in bytes +- `free`: `BigInt!` + - Free memory in bytes +- `id`: `PrefixedID!` +- `percentSwapTotal`: `Float!` + - Swap usage percentage +- `percentTotal`: `Float!` + - Memory usage percentage +- `swapFree`: `BigInt!` + - Free swap memory in bytes +- `swapTotal`: `BigInt!` + - Total swap memory in bytes +- `swapUsed`: `BigInt!` + - Used swap memory in bytes +- `total`: `BigInt!` + - Total system memory in bytes +- `used`: `BigInt!` + - Used memory in bytes + +### `Metrics` (OBJECT) +System metrics including CPU and memory utilization + +- Implements: `Node` +- Fields (3): +- `cpu`: `CpuUtilization` + - Current CPU utilization metrics +- `id`: `PrefixedID!` +- `memory`: `MemoryUtilization` + - Current memory utilization metrics + +### `Mutation` (OBJECT) +- Fields (22): +- `addPlugin`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **CONFIG** #### Description: Add one or more plugins to the API. Returns false if restart was triggered automatically, true if manual restart is required. + - Arguments: + - `input`: `PluginManagementInput!` +- `apiKey`: `ApiKeyMutations!` +- `archiveAll`: `NotificationOverview!` + - Arguments: + - `importance`: `NotificationImportance` +- `archiveNotification`: `Notification!` + - Marks a notification as archived. + - Arguments: + - `id`: `PrefixedID!` +- `archiveNotifications`: `NotificationOverview!` + - Arguments: + - `ids`: `[PrefixedID!]!` +- `array`: `ArrayMutations!` +- `configureUps`: `Boolean!` + - Arguments: + - `config`: `UPSConfigInput!` +- `createNotification`: `Notification!` + - Creates a new notification record + - Arguments: + - `input`: `NotificationData!` +- `customization`: `CustomizationMutations!` +- `deleteArchivedNotifications`: `NotificationOverview!` + - Deletes all archived notifications on server. +- `deleteNotification`: `NotificationOverview!` + - Arguments: + - `id`: `PrefixedID!` + - `type`: `NotificationType!` +- `docker`: `DockerMutations!` +- `initiateFlashBackup`: `FlashBackupStatus!` + - Initiates a flash drive backup using a configured remote. + - Arguments: + - `input`: `InitiateFlashBackupInput!` +- `parityCheck`: `ParityCheckMutations!` +- `rclone`: `RCloneMutations!` +- `recalculateOverview`: `NotificationOverview!` + - Reads each notification to recompute & update the overview. +- `removePlugin`: `Boolean!` + - #### Required Permissions: - Action: **DELETE_ANY** - Resource: **CONFIG** #### Description: Remove one or more plugins from the API. Returns false if restart was triggered automatically, true if manual restart is required. + - Arguments: + - `input`: `PluginManagementInput!` +- `unarchiveAll`: `NotificationOverview!` + - Arguments: + - `importance`: `NotificationImportance` +- `unarchiveNotifications`: `NotificationOverview!` + - Arguments: + - `ids`: `[PrefixedID!]!` +- `unreadNotification`: `Notification!` + - Marks a notification as unread. + - Arguments: + - `id`: `PrefixedID!` +- `updateSettings`: `UpdateSettingsResponse!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **CONFIG** + - Arguments: + - `input`: `JSON!` +- `vm`: `VmMutations!` + +### `Node` (INTERFACE) +- Interface fields (1): +- `id`: `PrefixedID!` +- Implemented by (43): `ApiKey`, `ApiKeyFormSettings`, `ArrayDisk`, `Config`, `CpuPackages`, `CpuUtilization`, `Disk`, `Docker`, `DockerContainer`, `DockerNetwork`, `Flash`, `Info`, `InfoBaseboard`, `InfoCpu`, `InfoDevices`, `InfoDisplay`, `InfoDisplayCase`, `InfoGpu`, `InfoMemory`, `InfoNetwork`, `InfoOs`, `InfoPci`, `InfoSystem`, `InfoUsb`, `InfoVersions`, `MemoryLayout`, `MemoryUtilization`, `Metrics`, `Notification`, `Notifications`, `ProfileModel`, `Registration`, `Server`, `Service`, `Settings`, `Share`, `SsoSettings`, `UnifiedSettings`, `UnraidArray`, `UserAccount`, `Vars`, `VmDomain`, `Vms` + +### `Notification` (OBJECT) +- Implements: `Node` +- Fields (9): +- `description`: `String!` +- `formattedTimestamp`: `String` +- `id`: `PrefixedID!` +- `importance`: `NotificationImportance!` +- `link`: `String` +- `subject`: `String!` +- `timestamp`: `String` + - ISO Timestamp for when the notification occurred +- `title`: `String!` + - Also known as 'event' +- `type`: `NotificationType!` + +### `NotificationCounts` (OBJECT) +- Fields (4): +- `alert`: `Int!` +- `info`: `Int!` +- `total`: `Int!` +- `warning`: `Int!` + +### `NotificationData` (INPUT_OBJECT) +- Input fields (5): +- `description`: `String!` +- `importance`: `NotificationImportance!` +- `link`: `String` +- `subject`: `String!` +- `title`: `String!` + +### `NotificationFilter` (INPUT_OBJECT) +- Input fields (4): +- `importance`: `NotificationImportance` +- `limit`: `Int!` +- `offset`: `Int!` +- `type`: `NotificationType!` + +### `NotificationImportance` (ENUM) +- Enum values (3): + - `ALERT` + - `INFO` + - `WARNING` + +### `NotificationOverview` (OBJECT) +- Fields (2): +- `archive`: `NotificationCounts!` +- `unread`: `NotificationCounts!` + +### `NotificationType` (ENUM) +- Enum values (2): + - `ARCHIVE` + - `UNREAD` + +### `Notifications` (OBJECT) +- Implements: `Node` +- Fields (3): +- `id`: `PrefixedID!` +- `list`: `[Notification!]!` + - Arguments: + - `filter`: `NotificationFilter!` +- `overview`: `NotificationOverview!` + - A cached overview of the notifications in the system & their severity. + +### `OidcAuthorizationRule` (OBJECT) +- Fields (3): +- `claim`: `String!` + - The claim to check (e.g., email, sub, groups, hd) +- `operator`: `AuthorizationOperator!` + - The comparison operator +- `value`: `[String!]!` + - The value(s) to match against + +### `OidcConfiguration` (OBJECT) +- Fields (2): +- `defaultAllowedOrigins`: `[String!]` + - Default allowed redirect origins that apply to all OIDC providers (e.g., Tailscale domains) +- `providers`: `[OidcProvider!]!` + - List of configured OIDC providers + +### `OidcProvider` (OBJECT) +- Fields (15): +- `authorizationEndpoint`: `String` + - OAuth2 authorization endpoint URL. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration +- `authorizationRuleMode`: `AuthorizationRuleMode` + - Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass). Defaults to OR. +- `authorizationRules`: `[OidcAuthorizationRule!]` + - Flexible authorization rules based on claims +- `buttonIcon`: `String` + - URL or base64 encoded icon for the login button +- `buttonStyle`: `String` + - Custom CSS styles for the button (e.g., "background: linear-gradient(to right, #4f46e5, #7c3aed); border-radius: 9999px;") +- `buttonText`: `String` + - Custom text for the login button +- `buttonVariant`: `String` + - Button variant style from Reka UI. See https://reka-ui.com/docs/components/button +- `clientId`: `String!` + - OAuth2 client ID registered with the provider +- `clientSecret`: `String` + - OAuth2 client secret (if required by provider) +- `id`: `PrefixedID!` + - The unique identifier for the OIDC provider +- `issuer`: `String` + - OIDC issuer URL (e.g., https://accounts.google.com). Required for auto-discovery via /.well-known/openid-configuration +- `jwksUri`: `String` + - JSON Web Key Set URI for token validation. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration +- `name`: `String!` + - Display name of the OIDC provider +- `scopes`: `[String!]!` + - OAuth2 scopes to request (e.g., openid, profile, email) +- `tokenEndpoint`: `String` + - OAuth2 token endpoint URL. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + +### `OidcSessionValidation` (OBJECT) +- Fields (2): +- `username`: `String` +- `valid`: `Boolean!` + +### `OrganizerContainerResource` (OBJECT) +- Fields (4): +- `id`: `String!` +- `meta`: `DockerContainer` +- `name`: `String!` +- `type`: `String!` + +### `OrganizerResource` (OBJECT) +- Fields (4): +- `id`: `String!` +- `meta`: `JSON` +- `name`: `String!` +- `type`: `String!` + +### `Owner` (OBJECT) +- Fields (3): +- `avatar`: `String!` +- `url`: `String!` +- `username`: `String!` + +### `PackageVersions` (OBJECT) +- Fields (8): +- `docker`: `String` + - Docker version +- `git`: `String` + - Git version +- `nginx`: `String` + - nginx version +- `node`: `String` + - Node.js version +- `npm`: `String` + - npm version +- `openssl`: `String` + - OpenSSL version +- `php`: `String` + - PHP version +- `pm2`: `String` + - pm2 version + +### `ParityCheck` (OBJECT) +- Fields (9): +- `correcting`: `Boolean` + - Whether corrections are being written to parity +- `date`: `DateTime` + - Date of the parity check +- `duration`: `Int` + - Duration of the parity check in seconds +- `errors`: `Int` + - Number of errors during the parity check +- `paused`: `Boolean` + - Whether the parity check is paused +- `progress`: `Int` + - Progress percentage of the parity check +- `running`: `Boolean` + - Whether the parity check is running +- `speed`: `String` + - Speed of the parity check, in MB/s +- `status`: `ParityCheckStatus!` + - Status of the parity check + +### `ParityCheckMutations` (OBJECT) +Parity check related mutations, WIP, response types and functionaliy will change + +- Fields (4): +- `cancel`: `JSON!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Cancel a parity check +- `pause`: `JSON!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Pause a parity check +- `resume`: `JSON!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Resume a parity check +- `start`: `JSON!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **ARRAY** #### Description: Start a parity check + - Arguments: + - `correct`: `Boolean!` + +### `ParityCheckStatus` (ENUM) +- Enum values (6): + - `CANCELLED` + - `COMPLETED` + - `FAILED` + - `NEVER_RUN` + - `PAUSED` + - `RUNNING` + +### `Permission` (OBJECT) +- Fields (2): +- `actions`: `[AuthAction!]!` + - Actions allowed on this resource +- `resource`: `Resource!` + +### `Plugin` (OBJECT) +- Fields (4): +- `hasApiModule`: `Boolean` + - Whether the plugin has an API module +- `hasCliModule`: `Boolean` + - Whether the plugin has a CLI module +- `name`: `String!` + - The name of the plugin package +- `version`: `String!` + - The version of the plugin package + +### `PluginManagementInput` (INPUT_OBJECT) +- Input fields (3): +- `bundled`: `Boolean!` + - Whether to treat plugins as bundled plugins. Bundled plugins are installed to node_modules at build time and controlled via config only. + - Default: `false` +- `names`: `[String!]!` + - Array of plugin package names to add or remove +- `restart`: `Boolean!` + - Whether to restart the API after the operation. When false, a restart has already been queued. + - Default: `true` + +### `Port` (SCALAR) +A field whose value is a valid TCP port within the range of 0 to 65535: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_ports + +- Scalar type + +### `PrefixedID` (SCALAR) +### Description: ID scalar type that prefixes the underlying ID with the server identifier on output and strips it on input. We use this scalar type to ensure that the ID is unique across all servers, allowing the same underlying resource ID to be used across different server instances. #### Input Behavior: When providing an ID as input (e.g., in arguments or input objects), the server identifier prefix (':') is optional. - If the prefix is present (e.g., '123:456'), it will be automatically stripped, and only the underlying ID ('456') will be used internally. - If the prefix is absent (e.g., '456'), the ID will be used as-is. This makes it flexible for clients, as they don't strictly need to know or provide the server ID. #### Output Behavior: When an ID is returned in the response (output), it will *always* be prefixed with the current server's unique identifier (e.g., '123:456'). #### Example: Note: The server identifier is '123' in this example. ##### Input (Prefix Optional): ```graphql # Both of these are valid inputs resolving to internal ID '456' { someQuery(id: "123:456") { ... } anotherQuery(id: "456") { ... } } ``` ##### Output (Prefix Always Added): ```graphql # Assuming internal ID is '456' { "data": { "someResource": { "id": "123:456" } } } ``` + +- Scalar type + +### `ProfileModel` (OBJECT) +- Implements: `Node` +- Fields (4): +- `avatar`: `String!` +- `id`: `PrefixedID!` +- `url`: `String!` +- `username`: `String!` + +### `PublicOidcProvider` (OBJECT) +- Fields (6): +- `buttonIcon`: `String` +- `buttonStyle`: `String` +- `buttonText`: `String` +- `buttonVariant`: `String` +- `id`: `ID!` +- `name`: `String!` + +### `PublicPartnerInfo` (OBJECT) +- Fields (4): +- `hasPartnerLogo`: `Boolean!` + - Indicates if a partner logo exists +- `partnerLogoUrl`: `String` + - The path to the partner logo image on the flash drive, relative to the activation code file +- `partnerName`: `String` +- `partnerUrl`: `String` + +### `Query` (OBJECT) +- Fields (46): +- `apiKey`: `ApiKey` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** + - Arguments: + - `id`: `PrefixedID!` +- `apiKeyPossiblePermissions`: `[Permission!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: All possible permissions for API keys +- `apiKeyPossibleRoles`: `[Role!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: All possible roles for API keys +- `apiKeys`: `[ApiKey!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** +- `array`: `UnraidArray!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** +- `config`: `Config!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** +- `customization`: `Customization` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CUSTOMIZATIONS** +- `disk`: `Disk!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **DISK** + - Arguments: + - `id`: `PrefixedID!` +- `disks`: `[Disk!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **DISK** +- `docker`: `Docker!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **DOCKER** +- `flash`: `Flash!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **FLASH** +- `getApiKeyCreationFormSchema`: `ApiKeyFormSettings!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** #### Description: Get JSON Schema for API key creation form +- `getAvailableAuthActions`: `[AuthAction!]!` + - Get all available authentication actions with possession +- `getPermissionsForRoles`: `[Permission!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: Get the actual permissions that would be granted by a set of roles + - Arguments: + - `roles`: `[Role!]!` +- `info`: `Info!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `isInitialSetup`: `Boolean!` +- `isSSOEnabled`: `Boolean!` +- `logFile`: `LogFileContent!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** + - Arguments: + - `lines`: `Int` + - `path`: `String!` + - `startLine`: `Int` +- `logFiles`: `[LogFile!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** +- `me`: `UserAccount!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ME** +- `metrics`: `Metrics!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `notifications`: `Notifications!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** #### Description: Get all notifications +- `oidcConfiguration`: `OidcConfiguration!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get the full OIDC configuration (admin only) +- `oidcProvider`: `OidcProvider` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get a specific OIDC provider by ID + - Arguments: + - `id`: `PrefixedID!` +- `oidcProviders`: `[OidcProvider!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get all configured OIDC providers (admin only) +- `online`: `Boolean!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ONLINE** +- `owner`: `Owner!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **OWNER** +- `parityHistory`: `[ParityCheck!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** +- `plugins`: `[Plugin!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: List all installed plugins with their metadata +- `previewEffectivePermissions`: `[Permission!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: Preview the effective permissions for a combination of roles and explicit permissions + - Arguments: + - `permissions`: `[AddPermissionInput!]` + - `roles`: `[Role!]` +- `publicOidcProviders`: `[PublicOidcProvider!]!` + - Get public OIDC provider information for login buttons +- `publicPartnerInfo`: `PublicPartnerInfo` +- `publicTheme`: `Theme!` +- `rclone`: `RCloneBackupSettings!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **FLASH** +- `registration`: `Registration` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **REGISTRATION** +- `server`: `Server` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** +- `servers`: `[Server!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** +- `services`: `[Service!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SERVICES** +- `settings`: `Settings!` +- `shares`: `[Share!]!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SHARE** +- `upsConfiguration`: `UPSConfiguration!` +- `upsDeviceById`: `UPSDevice` + - Arguments: + - `id`: `String!` +- `upsDevices`: `[UPSDevice!]!` +- `validateOidcSession`: `OidcSessionValidation!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Validate an OIDC session token (internal use for CLI validation) + - Arguments: + - `token`: `String!` +- `vars`: `Vars!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **VARS** +- `vms`: `Vms!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **VMS** #### Description: Get information about all VMs on the system + +### `RCloneBackupConfigForm` (OBJECT) +- Fields (3): +- `dataSchema`: `JSON!` +- `id`: `ID!` +- `uiSchema`: `JSON!` + +### `RCloneBackupSettings` (OBJECT) +- Fields (3): +- `configForm`: `RCloneBackupConfigForm!` + - Arguments: + - `formOptions`: `RCloneConfigFormInput` +- `drives`: `[RCloneDrive!]!` +- `remotes`: `[RCloneRemote!]!` + +### `RCloneConfigFormInput` (INPUT_OBJECT) +- Input fields (3): +- `parameters`: `JSON` +- `providerType`: `String` +- `showAdvanced`: `Boolean` + - Default: `false` + +### `RCloneDrive` (OBJECT) +- Fields (2): +- `name`: `String!` + - Provider name +- `options`: `JSON!` + - Provider options and configuration schema + +### `RCloneMutations` (OBJECT) +RClone related mutations + +- Fields (2): +- `createRCloneRemote`: `RCloneRemote!` + - #### Required Permissions: - Action: **CREATE_ANY** - Resource: **FLASH** #### Description: Create a new RClone remote + - Arguments: + - `input`: `CreateRCloneRemoteInput!` +- `deleteRCloneRemote`: `Boolean!` + - #### Required Permissions: - Action: **DELETE_ANY** - Resource: **FLASH** #### Description: Delete an existing RClone remote + - Arguments: + - `input`: `DeleteRCloneRemoteInput!` + +### `RCloneRemote` (OBJECT) +- Fields (4): +- `config`: `JSON!` + - Complete remote configuration +- `name`: `String!` +- `parameters`: `JSON!` +- `type`: `String!` + +### `Registration` (OBJECT) +- Implements: `Node` +- Fields (6): +- `expiration`: `String` +- `id`: `PrefixedID!` +- `keyFile`: `KeyFile` +- `state`: `RegistrationState` +- `type`: `registrationType` +- `updateExpiration`: `String` + +### `RegistrationState` (ENUM) +- Enum values (26): + - `BASIC` + - `EBLACKLISTED` + - `EBLACKLISTED1` + - `EBLACKLISTED2` + - `EEXPIRED` + - `EGUID` + - `EGUID1` + - `ENOCONN` + - `ENOFLASH` + - `ENOFLASH1` + - `ENOFLASH2` + - `ENOFLASH3` + - `ENOFLASH4` + - `ENOFLASH5` + - `ENOFLASH6` + - `ENOFLASH7` + - `ENOKEYFILE` + - `ENOKEYFILE1` + - `ENOKEYFILE2` + - `ETRIAL` + - `LIFETIME` + - `PLUS` + - `PRO` + - `STARTER` + - `TRIAL` + - `UNLEASHED` + +### `RemoveRoleFromApiKeyInput` (INPUT_OBJECT) +- Input fields (2): +- `apiKeyId`: `PrefixedID!` +- `role`: `Role!` + +### `ResolvedOrganizerEntry` (UNION) +- Possible types (3): `OrganizerContainerResource`, `OrganizerResource`, `ResolvedOrganizerFolder` + +### `ResolvedOrganizerFolder` (OBJECT) +- Fields (4): +- `children`: `[ResolvedOrganizerEntry!]!` +- `id`: `String!` +- `name`: `String!` +- `type`: `String!` + +### `ResolvedOrganizerV1` (OBJECT) +- Fields (2): +- `version`: `Float!` +- `views`: `[ResolvedOrganizerView!]!` + +### `ResolvedOrganizerView` (OBJECT) +- Fields (4): +- `id`: `String!` +- `name`: `String!` +- `prefs`: `JSON` +- `root`: `ResolvedOrganizerEntry!` + +### `Resource` (ENUM) +Available resources for permissions + +- Enum values (29): + - `ACTIVATION_CODE` + - `API_KEY` + - `ARRAY` + - `CLOUD` + - `CONFIG` + - `CONNECT` + - `CONNECT__REMOTE_ACCESS` + - `CUSTOMIZATIONS` + - `DASHBOARD` + - `DISK` + - `DISPLAY` + - `DOCKER` + - `FLASH` + - `INFO` + - `LOGS` + - `ME` + - `NETWORK` + - `NOTIFICATIONS` + - `ONLINE` + - `OS` + - `OWNER` + - `PERMISSION` + - `REGISTRATION` + - `SERVERS` + - `SERVICES` + - `SHARE` + - `VARS` + - `VMS` + - `WELCOME` + +### `Role` (ENUM) +Available roles for API keys and users + +- Enum values (4): + - `ADMIN` + - Full administrative access to all resources + - `CONNECT` + - Internal Role for Unraid Connect + - `GUEST` + - Basic read access to user profile only + - `VIEWER` + - Read-only access to all resources + +### `Server` (OBJECT) +- Implements: `Node` +- Fields (10): +- `apikey`: `String!` +- `guid`: `String!` +- `id`: `PrefixedID!` +- `lanip`: `String!` +- `localurl`: `String!` +- `name`: `String!` +- `owner`: `ProfileModel!` +- `remoteurl`: `String!` +- `status`: `ServerStatus!` + - Whether this server is online or offline +- `wanip`: `String!` + +### `ServerStatus` (ENUM) +- Enum values (3): + - `NEVER_CONNECTED` + - `OFFLINE` + - `ONLINE` + +### `Service` (OBJECT) +- Implements: `Node` +- Fields (5): +- `id`: `PrefixedID!` +- `name`: `String` +- `online`: `Boolean` +- `uptime`: `Uptime` +- `version`: `String` + +### `Settings` (OBJECT) +- Implements: `Node` +- Fields (4): +- `api`: `ApiConfig!` + - The API setting values +- `id`: `PrefixedID!` +- `sso`: `SsoSettings!` + - SSO settings +- `unified`: `UnifiedSettings!` + - A view of all settings + +### `Share` (OBJECT) +- Implements: `Node` +- Fields (16): +- `allocator`: `String` + - Allocator +- `cache`: `Boolean` + - Is this share cached +- `color`: `String` + - Color +- `comment`: `String` + - User comment +- `cow`: `String` + - COW +- `exclude`: `[String!]` + - Disks that are excluded from this share +- `floor`: `String` + - Floor +- `free`: `BigInt` + - (KB) Free space +- `id`: `PrefixedID!` +- `include`: `[String!]` + - Disks that are included in this share +- `luksStatus`: `String` + - LUKS status +- `name`: `String` + - Display name +- `nameOrig`: `String` + - Original name +- `size`: `BigInt` + - (KB) Total size +- `splitLevel`: `String` + - Split level +- `used`: `BigInt` + - (KB) Used Size + +### `SsoSettings` (OBJECT) +- Implements: `Node` +- Fields (2): +- `id`: `PrefixedID!` +- `oidcProviders`: `[OidcProvider!]!` + - List of configured OIDC providers + +### `String` (SCALAR) +The `String` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text. + +- Scalar type + +### `Subscription` (OBJECT) +- Fields (11): +- `arraySubscription`: `UnraidArray!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** +- `logFile`: `LogFileContent!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** + - Arguments: + - `path`: `String!` +- `notificationAdded`: `Notification!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** +- `notificationsOverview`: `NotificationOverview!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** +- `ownerSubscription`: `Owner!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **OWNER** +- `parityHistorySubscription`: `ParityCheck!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** +- `serversSubscription`: `Server!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** +- `systemMetricsCpu`: `CpuUtilization!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `systemMetricsCpuTelemetry`: `CpuPackages!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `systemMetricsMemory`: `MemoryUtilization!` + - #### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** +- `upsUpdates`: `UPSDevice!` + +### `Temperature` (ENUM) +Temperature unit + +- Enum values (2): + - `CELSIUS` + - `FAHRENHEIT` + +### `Theme` (OBJECT) +- Fields (7): +- `headerBackgroundColor`: `String` + - The background color of the header +- `headerPrimaryTextColor`: `String` + - The text color of the header +- `headerSecondaryTextColor`: `String` + - The secondary text color of the header +- `name`: `ThemeName!` + - The theme name +- `showBannerGradient`: `Boolean!` + - Whether to show the banner gradient +- `showBannerImage`: `Boolean!` + - Whether to show the header banner image +- `showHeaderDescription`: `Boolean!` + - Whether to show the description in the header + +### `ThemeName` (ENUM) +The theme name + +- Enum values (4): + - `azure` + - `black` + - `gray` + - `white` + +### `UPSBattery` (OBJECT) +- Fields (3): +- `chargeLevel`: `Int!` + - Battery charge level as a percentage (0-100). Unit: percent (%). Example: 100 means battery is fully charged +- `estimatedRuntime`: `Int!` + - Estimated runtime remaining on battery power. Unit: seconds. Example: 3600 means 1 hour of runtime remaining +- `health`: `String!` + - Battery health status. Possible values: 'Good', 'Replace', 'Unknown'. Indicates if the battery needs replacement + +### `UPSCableType` (ENUM) +UPS cable connection types + +- Enum values (5): + - `CUSTOM` + - `ETHER` + - `SIMPLE` + - `SMART` + - `USB` + +### `UPSConfigInput` (INPUT_OBJECT) +- Input fields (10): +- `batteryLevel`: `Int` + - Battery level percentage to initiate shutdown. Unit: percent (%) - Valid range: 0-100 +- `customUpsCable`: `String` + - Custom cable configuration (only used when upsCable is CUSTOM). Format depends on specific UPS model +- `device`: `String` + - Device path or network address for UPS connection. Examples: '/dev/ttyUSB0' for USB, '192.168.1.100:3551' for network +- `killUps`: `UPSKillPower` + - Turn off UPS power after system shutdown. Useful for ensuring complete power cycle +- `minutes`: `Int` + - Runtime left in minutes to initiate shutdown. Unit: minutes +- `overrideUpsCapacity`: `Int` + - Override UPS capacity for runtime calculations. Unit: watts (W). Leave unset to use UPS-reported capacity +- `service`: `UPSServiceState` + - Enable or disable the UPS monitoring service +- `timeout`: `Int` + - Time on battery before shutdown. Unit: seconds. Set to 0 to disable timeout-based shutdown +- `upsCable`: `UPSCableType` + - Type of cable connecting the UPS to the server +- `upsType`: `UPSType` + - UPS communication protocol + +### `UPSConfiguration` (OBJECT) +- Fields (14): +- `batteryLevel`: `Int` + - Battery level threshold for shutdown. Unit: percent (%). Example: 10 means shutdown when battery reaches 10%. System will shutdown when battery drops to this level +- `customUpsCable`: `String` + - Custom cable configuration string. Only used when upsCable is set to 'custom'. Format depends on specific UPS model +- `device`: `String` + - Device path or network address for UPS connection. Examples: '/dev/ttyUSB0' for USB, '192.168.1.100:3551' for network. Depends on upsType setting +- `killUps`: `String` + - Kill UPS power after shutdown. Values: 'yes' or 'no'. If 'yes', tells UPS to cut power after system shutdown. Useful for ensuring complete power cycle +- `minutes`: `Int` + - Runtime threshold for shutdown. Unit: minutes. Example: 5 means shutdown when 5 minutes runtime remaining. System will shutdown when estimated runtime drops below this +- `modelName`: `String` + - Override UPS model name. Used for display purposes. Leave unset to use UPS-reported model +- `netServer`: `String` + - Network server mode. Values: 'on' or 'off'. Enable to allow network clients to monitor this UPS +- `nisIp`: `String` + - Network Information Server (NIS) IP address. Default: '0.0.0.0' (listen on all interfaces). IP address for apcupsd network information server +- `overrideUpsCapacity`: `Int` + - Override UPS capacity for runtime calculations. Unit: volt-amperes (VA). Example: 1500 for a 1500VA UPS. Leave unset to use UPS-reported capacity +- `service`: `String` + - UPS service state. Values: 'enable' or 'disable'. Controls whether the UPS monitoring service is running +- `timeout`: `Int` + - Timeout for UPS communications. Unit: seconds. Example: 0 means no timeout. Time to wait for UPS response before considering it offline +- `upsCable`: `String` + - Type of cable connecting the UPS to the server. Common values: 'usb', 'smart', 'ether', 'custom'. Determines communication protocol +- `upsName`: `String` + - UPS name for network monitoring. Used to identify this UPS on the network. Example: 'SERVER_UPS' +- `upsType`: `String` + - UPS communication type. Common values: 'usb', 'net', 'snmp', 'dumb', 'pcnet', 'modbus'. Defines how the server communicates with the UPS + +### `UPSDevice` (OBJECT) +- Fields (6): +- `battery`: `UPSBattery!` + - Battery-related information +- `id`: `ID!` + - Unique identifier for the UPS device. Usually based on the model name or a generated ID +- `model`: `String!` + - UPS model name/number. Example: 'APC Back-UPS Pro 1500' +- `name`: `String!` + - Display name for the UPS device. Can be customized by the user +- `power`: `UPSPower!` + - Power-related information +- `status`: `String!` + - Current operational status of the UPS. Common values: 'Online', 'On Battery', 'Low Battery', 'Replace Battery', 'Overload', 'Offline'. 'Online' means running on mains power, 'On Battery' means running on battery backup + +### `UPSKillPower` (ENUM) +Kill UPS power after shutdown option + +- Enum values (2): + - `NO` + - `YES` + +### `UPSPower` (OBJECT) +- Fields (3): +- `inputVoltage`: `Float!` + - Input voltage from the wall outlet/mains power. Unit: volts (V). Example: 120.5 for typical US household voltage +- `loadPercentage`: `Int!` + - Current load on the UPS as a percentage of its capacity. Unit: percent (%). Example: 25 means UPS is loaded at 25% of its maximum capacity +- `outputVoltage`: `Float!` + - Output voltage being delivered to connected devices. Unit: volts (V). Example: 120.5 - should match input voltage when on mains power + +### `UPSServiceState` (ENUM) +Service state for UPS daemon + +- Enum values (2): + - `DISABLE` + - `ENABLE` + +### `UPSType` (ENUM) +UPS communication protocols + +- Enum values (7): + - `APCSMART` + - `DUMB` + - `MODBUS` + - `NET` + - `PCNET` + - `SNMP` + - `USB` + +### `UnifiedSettings` (OBJECT) +- Implements: `FormSchema`, `Node` +- Fields (4): +- `dataSchema`: `JSON!` + - The data schema for the settings +- `id`: `PrefixedID!` +- `uiSchema`: `JSON!` + - The UI schema for the settings +- `values`: `JSON!` + - The current values of the settings + +### `UnraidArray` (OBJECT) +- Implements: `Node` +- Fields (8): +- `boot`: `ArrayDisk` + - Current boot disk +- `caches`: `[ArrayDisk!]!` + - Caches in the current array +- `capacity`: `ArrayCapacity!` + - Current array capacity +- `disks`: `[ArrayDisk!]!` + - Data disks in the current array +- `id`: `PrefixedID!` +- `parities`: `[ArrayDisk!]!` + - Parity disks in the current array +- `parityCheckStatus`: `ParityCheck!` + - Current parity check status +- `state`: `ArrayState!` + - Current array state + +### `UpdateApiKeyInput` (INPUT_OBJECT) +- Input fields (5): +- `description`: `String` +- `id`: `PrefixedID!` +- `name`: `String` +- `permissions`: `[AddPermissionInput!]` +- `roles`: `[Role!]` + +### `UpdateSettingsResponse` (OBJECT) +- Fields (3): +- `restartRequired`: `Boolean!` + - Whether a restart is required for the changes to take effect +- `values`: `JSON!` + - The updated settings values +- `warnings`: `[String!]` + - Warning messages about configuration issues found during validation + +### `UpdateStatus` (ENUM) +Update status of a container. + +- Enum values (4): + - `REBUILD_READY` + - `UNKNOWN` + - `UPDATE_AVAILABLE` + - `UP_TO_DATE` + +### `Uptime` (OBJECT) +- Fields (1): +- `timestamp`: `String` + +### `UserAccount` (OBJECT) +- Implements: `Node` +- Fields (5): +- `description`: `String!` + - A description of the user +- `id`: `PrefixedID!` +- `name`: `String!` + - The name of the user +- `permissions`: `[Permission!]` + - The permissions of the user +- `roles`: `[Role!]!` + - The roles of the user + +### `Vars` (OBJECT) +- Implements: `Node` +- Fields (143): +- `bindMgt`: `Boolean` +- `cacheNumDevices`: `Int` +- `cacheSbNumDisks`: `Int` +- `comment`: `String` +- `configError`: `ConfigErrorState` +- `configValid`: `Boolean` +- `csrfToken`: `String` +- `defaultFormat`: `String` +- `defaultFsType`: `String` +- `deviceCount`: `Int` +- `domain`: `String` +- `domainLogin`: `String` +- `domainShort`: `String` +- `enableFruit`: `String` +- `flashGuid`: `String` +- `flashProduct`: `String` +- `flashVendor`: `String` +- `fsCopyPrcnt`: `Int` + - Percentage from 0 - 100 while upgrading a disk or swapping parity drives +- `fsNumMounted`: `Int` +- `fsNumUnmountable`: `Int` +- `fsProgress`: `String` + - Human friendly string of array events happening +- `fsState`: `String` +- `fsUnmountableMask`: `String` +- `fuseDirectio`: `String` +- `fuseDirectioDefault`: `String` +- `fuseDirectioStatus`: `String` +- `fuseRemember`: `String` +- `fuseRememberDefault`: `String` +- `fuseRememberStatus`: `String` +- `hideDotFiles`: `Boolean` +- `id`: `PrefixedID!` +- `joinStatus`: `String` +- `localMaster`: `Boolean` +- `localTld`: `String` +- `luksKeyfile`: `String` +- `maxArraysz`: `Int` +- `maxCachesz`: `Int` +- `mdColor`: `String` +- `mdNumDisabled`: `Int` +- `mdNumDisks`: `Int` +- `mdNumErased`: `Int` +- `mdNumInvalid`: `Int` +- `mdNumMissing`: `Int` +- `mdNumNew`: `Int` +- `mdNumStripes`: `Int` +- `mdNumStripesDefault`: `Int` +- `mdNumStripesStatus`: `String` +- `mdResync`: `Int` +- `mdResyncAction`: `String` +- `mdResyncCorr`: `String` +- `mdResyncDb`: `String` +- `mdResyncDt`: `String` +- `mdResyncPos`: `String` +- `mdResyncSize`: `Int` +- `mdState`: `String` +- `mdSyncThresh`: `Int` +- `mdSyncThreshDefault`: `Int` +- `mdSyncThreshStatus`: `String` +- `mdSyncWindow`: `Int` +- `mdSyncWindowDefault`: `Int` +- `mdSyncWindowStatus`: `String` +- `mdVersion`: `String` +- `mdWriteMethod`: `Int` +- `mdWriteMethodDefault`: `String` +- `mdWriteMethodStatus`: `String` +- `name`: `String` + - Machine hostname +- `nrRequests`: `Int` +- `nrRequestsDefault`: `Int` +- `nrRequestsStatus`: `String` +- `ntpServer1`: `String` + - NTP Server 1 +- `ntpServer2`: `String` + - NTP Server 2 +- `ntpServer3`: `String` + - NTP Server 3 +- `ntpServer4`: `String` + - NTP Server 4 +- `pollAttributes`: `String` +- `pollAttributesDefault`: `String` +- `pollAttributesStatus`: `String` +- `port`: `Int` + - Port for the webui via HTTP +- `portssh`: `Int` +- `portssl`: `Int` + - Port for the webui via HTTPS +- `porttelnet`: `Int` +- `queueDepth`: `String` +- `regCheck`: `String` +- `regFile`: `String` +- `regGen`: `String` +- `regGuid`: `String` +- `regState`: `RegistrationState` +- `regTm`: `String` +- `regTm2`: `String` +- `regTo`: `String` + - Registration owner +- `regTy`: `registrationType` +- `safeMode`: `Boolean` +- `sbClean`: `Boolean` +- `sbEvents`: `Int` +- `sbName`: `String` +- `sbNumDisks`: `Int` +- `sbState`: `String` +- `sbSyncErrs`: `Int` +- `sbSyncExit`: `String` +- `sbSynced`: `Int` +- `sbSynced2`: `Int` +- `sbUpdated`: `String` +- `sbVersion`: `String` +- `security`: `String` +- `shareAfpCount`: `Int` + - Total amount shares with AFP enabled +- `shareAfpEnabled`: `Boolean` +- `shareAvahiAfpModel`: `String` +- `shareAvahiAfpName`: `String` +- `shareAvahiEnabled`: `Boolean` +- `shareAvahiSmbModel`: `String` +- `shareAvahiSmbName`: `String` +- `shareCacheEnabled`: `Boolean` +- `shareCacheFloor`: `String` +- `shareCount`: `Int` + - Total amount of user shares +- `shareDisk`: `String` +- `shareInitialGroup`: `String` +- `shareInitialOwner`: `String` +- `shareMoverActive`: `Boolean` +- `shareMoverLogging`: `Boolean` +- `shareMoverSchedule`: `String` +- `shareNfsCount`: `Int` + - Total amount shares with NFS enabled +- `shareNfsEnabled`: `Boolean` +- `shareSmbCount`: `Int` + - Total amount shares with SMB enabled +- `shareSmbEnabled`: `Boolean` +- `shareUser`: `String` +- `shareUserExclude`: `String` +- `shareUserInclude`: `String` +- `shutdownTimeout`: `Int` +- `spindownDelay`: `String` +- `spinupGroups`: `Boolean` +- `startArray`: `Boolean` +- `startMode`: `String` +- `startPage`: `String` +- `sysArraySlots`: `Int` +- `sysCacheSlots`: `Int` +- `sysFlashSlots`: `Int` +- `sysModel`: `String` +- `timeZone`: `String` +- `useNtp`: `Boolean` + - Should a NTP server be used for time sync? +- `useSsh`: `Boolean` +- `useSsl`: `Boolean` +- `useTelnet`: `Boolean` + - Should telnet be enabled? +- `version`: `String` + - Unraid version +- `workgroup`: `String` + +### `VmDomain` (OBJECT) +- Implements: `Node` +- Fields (4): +- `id`: `PrefixedID!` + - The unique identifier for the vm (uuid) +- `name`: `String` + - A friendly name for the vm +- `state`: `VmState!` + - Current domain vm state +- `uuid`: `String` + - The UUID of the vm + - Deprecated: Use id instead + +### `VmMutations` (OBJECT) +- Fields (7): +- `forceStop`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **VMS** #### Description: Force stop a virtual machine + - Arguments: + - `id`: `PrefixedID!` +- `pause`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **VMS** #### Description: Pause a virtual machine + - Arguments: + - `id`: `PrefixedID!` +- `reboot`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **VMS** #### Description: Reboot a virtual machine + - Arguments: + - `id`: `PrefixedID!` +- `reset`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **VMS** #### Description: Reset a virtual machine + - Arguments: + - `id`: `PrefixedID!` +- `resume`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **VMS** #### Description: Resume a virtual machine + - Arguments: + - `id`: `PrefixedID!` +- `start`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **VMS** #### Description: Start a virtual machine + - Arguments: + - `id`: `PrefixedID!` +- `stop`: `Boolean!` + - #### Required Permissions: - Action: **UPDATE_ANY** - Resource: **VMS** #### Description: Stop a virtual machine + - Arguments: + - `id`: `PrefixedID!` + +### `VmState` (ENUM) +The state of a virtual machine + +- Enum values (8): + - `CRASHED` + - `IDLE` + - `NOSTATE` + - `PAUSED` + - `PMSUSPENDED` + - `RUNNING` + - `SHUTDOWN` + - `SHUTOFF` + +### `Vms` (OBJECT) +- Implements: `Node` +- Fields (3): +- `domain`: `[VmDomain!]` +- `domains`: `[VmDomain!]` +- `id`: `PrefixedID!` + +### `registrationType` (ENUM) +- Enum values (8): + - `BASIC` + - `INVALID` + - `LIFETIME` + - `PLUS` + - `PRO` + - `STARTER` + - `TRIAL` + - `UNLEASHED` diff --git a/docs/UNRAID_API_OPERATIONS.md b/docs/UNRAID_API_OPERATIONS.md new file mode 100644 index 0000000..7b0bc9b --- /dev/null +++ b/docs/UNRAID_API_OPERATIONS.md @@ -0,0 +1,290 @@ +# Unraid GraphQL API Operations + +Generated via live introspection at `2026-02-15 23:45:50Z`. + +## Schema Summary +- Query root: `Query` +- Mutation root: `Mutation` +- Subscription root: `Subscription` +- Total types: **164** +- Total directives: **6** +- Type kinds: +- `ENUM`: 32 +- `INPUT_OBJECT`: 16 +- `INTERFACE`: 2 +- `OBJECT`: 103 +- `SCALAR`: 10 +- `UNION`: 1 + +## Queries +Total: **46** + +### `apiKey(id: PrefixedID!): ApiKey` +#### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** + +Arguments: +- `id`: `PrefixedID!` + +### `apiKeyPossiblePermissions(): [Permission!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: All possible permissions for API keys + +### `apiKeyPossibleRoles(): [Role!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: All possible roles for API keys + +### `apiKeys(): [ApiKey!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** + +### `array(): UnraidArray!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** + +### `config(): Config!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** + +### `customization(): Customization` +#### Required Permissions: - Action: **READ_ANY** - Resource: **CUSTOMIZATIONS** + +### `disk(id: PrefixedID!): Disk!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **DISK** + +Arguments: +- `id`: `PrefixedID!` + +### `disks(): [Disk!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **DISK** + +### `docker(): Docker!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **DOCKER** + +### `flash(): Flash!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **FLASH** + +### `getApiKeyCreationFormSchema(): ApiKeyFormSettings!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **API_KEY** #### Description: Get JSON Schema for API key creation form + +### `getAvailableAuthActions(): [AuthAction!]!` +Get all available authentication actions with possession + +### `getPermissionsForRoles(roles: [Role!]!): [Permission!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: Get the actual permissions that would be granted by a set of roles + +Arguments: +- `roles`: `[Role!]!` + +### `info(): Info!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** + +### `isInitialSetup(): Boolean!` +### `isSSOEnabled(): Boolean!` +### `logFile(lines: Int, path: String!, startLine: Int): LogFileContent!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** + +Arguments: +- `lines`: `Int` +- `path`: `String!` +- `startLine`: `Int` + +### `logFiles(): [LogFile!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** + +### `me(): UserAccount!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **ME** + +### `metrics(): Metrics!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** + +### `notifications(): Notifications!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** #### Description: Get all notifications + +### `oidcConfiguration(): OidcConfiguration!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get the full OIDC configuration (admin only) + +### `oidcProvider(id: PrefixedID!): OidcProvider` +#### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get a specific OIDC provider by ID + +Arguments: +- `id`: `PrefixedID!` + +### `oidcProviders(): [OidcProvider!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Get all configured OIDC providers (admin only) + +### `online(): Boolean!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **ONLINE** + +### `owner(): Owner!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **OWNER** + +### `parityHistory(): [ParityCheck!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** + +### `plugins(): [Plugin!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: List all installed plugins with their metadata + +### `previewEffectivePermissions(permissions: [AddPermissionInput!], roles: [Role!]): [Permission!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **PERMISSION** #### Description: Preview the effective permissions for a combination of roles and explicit permissions + +Arguments: +- `permissions`: `[AddPermissionInput!]` +- `roles`: `[Role!]` + +### `publicOidcProviders(): [PublicOidcProvider!]!` +Get public OIDC provider information for login buttons + +### `publicPartnerInfo(): PublicPartnerInfo` +### `publicTheme(): Theme!` +### `rclone(): RCloneBackupSettings!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **FLASH** + +### `registration(): Registration` +#### Required Permissions: - Action: **READ_ANY** - Resource: **REGISTRATION** + +### `server(): Server` +#### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** + +### `servers(): [Server!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** + +### `services(): [Service!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **SERVICES** + +### `settings(): Settings!` +### `shares(): [Share!]!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **SHARE** + +### `upsConfiguration(): UPSConfiguration!` +### `upsDeviceById(id: String!): UPSDevice` +Arguments: +- `id`: `String!` + +### `upsDevices(): [UPSDevice!]!` +### `validateOidcSession(token: String!): OidcSessionValidation!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **CONFIG** #### Description: Validate an OIDC session token (internal use for CLI validation) + +Arguments: +- `token`: `String!` + +### `vars(): Vars!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **VARS** + +### `vms(): Vms!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **VMS** #### Description: Get information about all VMs on the system + +## Mutations +Total: **22** + +### `addPlugin(input: PluginManagementInput!): Boolean!` +#### Required Permissions: - Action: **UPDATE_ANY** - Resource: **CONFIG** #### Description: Add one or more plugins to the API. Returns false if restart was triggered automatically, true if manual restart is required. + +Arguments: +- `input`: `PluginManagementInput!` + +### `apiKey(): ApiKeyMutations!` +### `archiveAll(importance: NotificationImportance): NotificationOverview!` +Arguments: +- `importance`: `NotificationImportance` + +### `archiveNotification(id: PrefixedID!): Notification!` +Marks a notification as archived. + +Arguments: +- `id`: `PrefixedID!` + +### `archiveNotifications(ids: [PrefixedID!]!): NotificationOverview!` +Arguments: +- `ids`: `[PrefixedID!]!` + +### `array(): ArrayMutations!` +### `configureUps(config: UPSConfigInput!): Boolean!` +Arguments: +- `config`: `UPSConfigInput!` + +### `createNotification(input: NotificationData!): Notification!` +Creates a new notification record + +Arguments: +- `input`: `NotificationData!` + +### `customization(): CustomizationMutations!` +### `deleteArchivedNotifications(): NotificationOverview!` +Deletes all archived notifications on server. + +### `deleteNotification(id: PrefixedID!, type: NotificationType!): NotificationOverview!` +Arguments: +- `id`: `PrefixedID!` +- `type`: `NotificationType!` + +### `docker(): DockerMutations!` +### `initiateFlashBackup(input: InitiateFlashBackupInput!): FlashBackupStatus!` +Initiates a flash drive backup using a configured remote. + +Arguments: +- `input`: `InitiateFlashBackupInput!` + +### `parityCheck(): ParityCheckMutations!` +### `rclone(): RCloneMutations!` +### `recalculateOverview(): NotificationOverview!` +Reads each notification to recompute & update the overview. + +### `removePlugin(input: PluginManagementInput!): Boolean!` +#### Required Permissions: - Action: **DELETE_ANY** - Resource: **CONFIG** #### Description: Remove one or more plugins from the API. Returns false if restart was triggered automatically, true if manual restart is required. + +Arguments: +- `input`: `PluginManagementInput!` + +### `unarchiveAll(importance: NotificationImportance): NotificationOverview!` +Arguments: +- `importance`: `NotificationImportance` + +### `unarchiveNotifications(ids: [PrefixedID!]!): NotificationOverview!` +Arguments: +- `ids`: `[PrefixedID!]!` + +### `unreadNotification(id: PrefixedID!): Notification!` +Marks a notification as unread. + +Arguments: +- `id`: `PrefixedID!` + +### `updateSettings(input: JSON!): UpdateSettingsResponse!` +#### Required Permissions: - Action: **UPDATE_ANY** - Resource: **CONFIG** + +Arguments: +- `input`: `JSON!` + +### `vm(): VmMutations!` +## Subscriptions +Total: **11** + +### `arraySubscription(): UnraidArray!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** + +### `logFile(path: String!): LogFileContent!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **LOGS** + +Arguments: +- `path`: `String!` + +### `notificationAdded(): Notification!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** + +### `notificationsOverview(): NotificationOverview!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **NOTIFICATIONS** + +### `ownerSubscription(): Owner!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **OWNER** + +### `parityHistorySubscription(): ParityCheck!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **ARRAY** + +### `serversSubscription(): Server!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **SERVERS** + +### `systemMetricsCpu(): CpuUtilization!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** + +### `systemMetricsCpuTelemetry(): CpuPackages!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** + +### `systemMetricsMemory(): MemoryUtilization!` +#### Required Permissions: - Action: **READ_ANY** - Resource: **INFO** + +### `upsUpdates(): UPSDevice!` diff --git a/pyproject.toml b/pyproject.toml index 807b4a0..6a95311 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -284,9 +284,11 @@ dev = [ "pytest>=8.4.2", "pytest-asyncio>=1.2.0", "pytest-cov>=7.0.0", + "respx>=0.22.0", "types-pytz>=2025.2.0.20250809", "ty>=0.0.15", "ruff>=0.12.8", "build>=1.2.2", "twine>=6.0.1", + "graphql-core>=3.2.0", ] diff --git a/scripts/generate_unraid_api_reference.py b/scripts/generate_unraid_api_reference.py new file mode 100644 index 0000000..e5e301b --- /dev/null +++ b/scripts/generate_unraid_api_reference.py @@ -0,0 +1,447 @@ +#!/usr/bin/env python3 +"""Generate a complete Markdown reference from Unraid GraphQL introspection.""" + +from __future__ import annotations + +import argparse +import json +import os +from collections import Counter, defaultdict +from pathlib import Path +from typing import Any + +import httpx + + +DEFAULT_OUTPUT = Path("docs/UNRAID_API_COMPLETE_REFERENCE.md") + +INTROSPECTION_QUERY = """ +query FullIntrospection { + __schema { + queryType { name } + mutationType { name } + subscriptionType { name } + directives { + name + description + locations + args { + name + description + defaultValue + type { ...TypeRef } + } + } + types { + kind + name + description + fields(includeDeprecated: true) { + name + description + isDeprecated + deprecationReason + args { + name + description + defaultValue + type { ...TypeRef } + } + type { ...TypeRef } + } + inputFields { + name + description + defaultValue + type { ...TypeRef } + } + interfaces { kind name } + enumValues(includeDeprecated: true) { + name + description + isDeprecated + deprecationReason + } + possibleTypes { kind name } + } + } +} + +fragment TypeRef on __Type { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + } + } + } + } + } + } + } +} +""" + + +def _clean(text: str | None) -> str: + """Collapse multiline description text into a single line.""" + if not text: + return "" + return " ".join(text.split()) + + +def _type_to_str(type_ref: dict[str, Any] | None) -> str: + """Render GraphQL nested type refs to SDL-like notation.""" + if not type_ref: + return "Unknown" + kind = type_ref.get("kind") + if kind == "NON_NULL": + return f"{_type_to_str(type_ref.get('ofType'))}!" + if kind == "LIST": + return f"[{_type_to_str(type_ref.get('ofType'))}]" + return str(type_ref.get("name") or kind or "Unknown") + + +def _field_lines(field: dict[str, Any], *, is_input: bool) -> list[str]: + """Render field/input-field markdown lines.""" + lines: list[str] = [] + lines.append(f"- `{field['name']}`: `{_type_to_str(field.get('type'))}`") + + description = _clean(field.get("description")) + if description: + lines.append(f" - {description}") + + default_value = field.get("defaultValue") + if default_value is not None: + lines.append(f" - Default: `{default_value}`") + + if not is_input: + args = sorted(field.get("args") or [], key=lambda item: str(item["name"])) + if args: + lines.append(" - Arguments:") + for arg in args: + arg_line = f" - `{arg['name']}`: `{_type_to_str(arg.get('type'))}`" + if arg.get("defaultValue") is not None: + arg_line += f" (default: `{arg['defaultValue']}`)" + lines.append(arg_line) + + arg_description = _clean(arg.get("description")) + if arg_description: + lines.append(f" - {arg_description}") + + if field.get("isDeprecated"): + reason = _clean(field.get("deprecationReason")) + lines.append(f" - Deprecated: {reason}" if reason else " - Deprecated") + + return lines + + +def _build_markdown(schema: dict[str, Any], *, include_introspection: bool) -> str: + """Build full Markdown schema reference.""" + all_types = schema.get("types") or [] + types = [ + item + for item in all_types + if item.get("name") and (include_introspection or not str(item["name"]).startswith("__")) + ] + types_by_name = {str(item["name"]): item for item in types} + + kind_counts = Counter(str(item.get("kind", "UNKNOWN")) for item in types) + directives = sorted(schema.get("directives") or [], key=lambda item: str(item["name"])) + + implements_map: dict[str, list[str]] = defaultdict(list) + for item in types: + for interface in item.get("interfaces") or []: + interface_name = interface.get("name") + if interface_name: + implements_map[str(interface_name)].append(str(item["name"])) + + query_root = (schema.get("queryType") or {}).get("name") + mutation_root = (schema.get("mutationType") or {}).get("name") + subscription_root = (schema.get("subscriptionType") or {}).get("name") + + lines: list[str] = [] + lines.append("# Unraid GraphQL API Complete Schema Reference") + lines.append("") + lines.append( + "Generated via live GraphQL introspection for the configured endpoint and API key." + ) + lines.append("") + lines.append("This is permission-scoped: it contains everything visible to the API key used.") + lines.append("") + lines.append("## Table of Contents") + lines.append("- [Schema Summary](#schema-summary)") + lines.append("- [Root Operations](#root-operations)") + lines.append("- [Directives](#directives)") + lines.append("- [All Types (Alphabetical)](#all-types-alphabetical)") + lines.append("") + + lines.append("## Schema Summary") + lines.append(f"- Query root: `{query_root}`") + lines.append(f"- Mutation root: `{mutation_root}`") + lines.append(f"- Subscription root: `{subscription_root}`") + lines.append(f"- Total types: **{len(types)}**") + lines.append(f"- Total directives: **{len(directives)}**") + lines.append("- Type kinds:") + lines.extend(f"- `{kind}`: {kind_counts[kind]}" for kind in sorted(kind_counts)) + lines.append("") + + def render_root(root_name: str | None, label: str) -> None: + lines.append(f"### {label}") + if not root_name or root_name not in types_by_name: + lines.append("Not exposed.") + lines.append("") + return + + root_type = types_by_name[root_name] + fields = sorted(root_type.get("fields") or [], key=lambda item: str(item["name"])) + lines.append(f"Total fields: **{len(fields)}**") + lines.append("") + for field in fields: + args = sorted(field.get("args") or [], key=lambda item: str(item["name"])) + arg_signature: list[str] = [] + for arg in args: + part = f"{arg['name']}: {_type_to_str(arg.get('type'))}" + if arg.get("defaultValue") is not None: + part += f" = {arg['defaultValue']}" + arg_signature.append(part) + + signature = ( + f"{field['name']}({', '.join(arg_signature)})" + if arg_signature + else f"{field['name']}()" + ) + lines.append(f"- `{signature}: {_type_to_str(field.get('type'))}`") + + description = _clean(field.get("description")) + if description: + lines.append(f" - {description}") + + if field.get("isDeprecated"): + reason = _clean(field.get("deprecationReason")) + lines.append(f" - Deprecated: {reason}" if reason else " - Deprecated") + lines.append("") + + lines.append("## Root Operations") + render_root(query_root, "Queries") + render_root(mutation_root, "Mutations") + render_root(subscription_root, "Subscriptions") + + lines.append("## Directives") + if not directives: + lines.append("No directives exposed.") + lines.append("") + else: + for directive in directives: + lines.append(f"### `@{directive['name']}`") + description = _clean(directive.get("description")) + if description: + lines.append(description) + lines.append("") + locations = directive.get("locations") or [] + lines.append( + f"- Locations: {', '.join(f'`{item}`' for item in locations) if locations else 'None'}" + ) + args = sorted(directive.get("args") or [], key=lambda item: str(item["name"])) + if args: + lines.append("- Arguments:") + for arg in args: + line = f" - `{arg['name']}`: `{_type_to_str(arg.get('type'))}`" + if arg.get("defaultValue") is not None: + line += f" (default: `{arg['defaultValue']}`)" + lines.append(line) + arg_description = _clean(arg.get("description")) + if arg_description: + lines.append(f" - {arg_description}") + lines.append("") + + lines.append("## All Types (Alphabetical)") + for item in sorted(types, key=lambda row: str(row["name"])): + name = str(item["name"]) + kind = str(item["kind"]) + lines.append(f"### `{name}` ({kind})") + + description = _clean(item.get("description")) + if description: + lines.append(description) + lines.append("") + + if kind == "OBJECT": + interfaces = sorted( + str(interface["name"]) + for interface in (item.get("interfaces") or []) + if interface.get("name") + ) + if interfaces: + lines.append(f"- Implements: {', '.join(f'`{value}`' for value in interfaces)}") + + fields = sorted(item.get("fields") or [], key=lambda row: str(row["name"])) + lines.append(f"- Fields ({len(fields)}):") + if fields: + for field in fields: + lines.extend(_field_lines(field, is_input=False)) + else: + lines.append("- None") + + elif kind == "INPUT_OBJECT": + fields = sorted(item.get("inputFields") or [], key=lambda row: str(row["name"])) + lines.append(f"- Input fields ({len(fields)}):") + if fields: + for field in fields: + lines.extend(_field_lines(field, is_input=True)) + else: + lines.append("- None") + + elif kind == "ENUM": + enum_values = sorted(item.get("enumValues") or [], key=lambda row: str(row["name"])) + lines.append(f"- Enum values ({len(enum_values)}):") + if enum_values: + for enum_value in enum_values: + lines.append(f" - `{enum_value['name']}`") + enum_description = _clean(enum_value.get("description")) + if enum_description: + lines.append(f" - {enum_description}") + if enum_value.get("isDeprecated"): + reason = _clean(enum_value.get("deprecationReason")) + lines.append( + f" - Deprecated: {reason}" if reason else " - Deprecated" + ) + else: + lines.append("- None") + + elif kind == "INTERFACE": + fields = sorted(item.get("fields") or [], key=lambda row: str(row["name"])) + lines.append(f"- Interface fields ({len(fields)}):") + if fields: + for field in fields: + lines.extend(_field_lines(field, is_input=False)) + else: + lines.append("- None") + + implementers = sorted(implements_map.get(name, [])) + if implementers: + lines.append( + f"- Implemented by ({len(implementers)}): " + + ", ".join(f"`{value}`" for value in implementers) + ) + else: + lines.append("- Implemented by (0): None") + + elif kind == "UNION": + possible_types = sorted( + str(possible["name"]) + for possible in (item.get("possibleTypes") or []) + if possible.get("name") + ) + if possible_types: + lines.append( + f"- Possible types ({len(possible_types)}): " + + ", ".join(f"`{value}`" for value in possible_types) + ) + else: + lines.append("- Possible types (0): None") + + elif kind == "SCALAR": + lines.append("- Scalar type") + + else: + lines.append("- Unhandled type kind") + + lines.append("") + + return "\n".join(lines).rstrip() + "\n" + + +def _parse_args() -> argparse.Namespace: + """Parse CLI args.""" + parser = argparse.ArgumentParser( + description="Generate complete Unraid GraphQL schema reference Markdown from introspection." + ) + parser.add_argument( + "--api-url", + default=os.getenv("UNRAID_API_URL", ""), + help="GraphQL endpoint URL (default: UNRAID_API_URL env var).", + ) + parser.add_argument( + "--api-key", + default=os.getenv("UNRAID_API_KEY", ""), + help="API key (default: UNRAID_API_KEY env var).", + ) + parser.add_argument( + "--output", + type=Path, + default=DEFAULT_OUTPUT, + help=f"Output markdown file path (default: {DEFAULT_OUTPUT}).", + ) + parser.add_argument( + "--timeout-seconds", + type=float, + default=90.0, + help="HTTP timeout in seconds (default: 90).", + ) + parser.add_argument( + "--verify-ssl", + action="store_true", + help="Enable SSL cert verification. Default is disabled for local/self-signed setups.", + ) + parser.add_argument( + "--include-introspection-types", + action="store_true", + help="Include __Schema/__Type/etc in the generated type list.", + ) + return parser.parse_args() + + +def main() -> int: + """Run generator CLI.""" + args = _parse_args() + + if not args.api_url: + raise SystemExit("Missing API URL. Provide --api-url or set UNRAID_API_URL.") + if not args.api_key: + raise SystemExit("Missing API key. Provide --api-key or set UNRAID_API_KEY.") + + headers = {"Authorization": f"Bearer {args.api_key}", "Content-Type": "application/json"} + + with httpx.Client(timeout=args.timeout_seconds, verify=args.verify_ssl) as client: + response = client.post(args.api_url, json={"query": INTROSPECTION_QUERY}, headers=headers) + + response.raise_for_status() + payload = response.json() + if payload.get("errors"): + errors = json.dumps(payload["errors"], indent=2) + raise SystemExit(f"GraphQL introspection returned errors:\n{errors}") + + schema = (payload.get("data") or {}).get("__schema") + if not schema: + raise SystemExit("GraphQL introspection returned no __schema payload.") + + markdown = _build_markdown(schema, include_introspection=bool(args.include_introspection_types)) + args.output.parent.mkdir(parents=True, exist_ok=True) + args.output.write_text(markdown, encoding="utf-8") + + print(f"Wrote {args.output}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tests/http/__init__.py b/tests/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/http/test_request_construction.py b/tests/http/test_request_construction.py new file mode 100644 index 0000000..90f95da --- /dev/null +++ b/tests/http/test_request_construction.py @@ -0,0 +1,1187 @@ +"""HTTP layer tests that mock at the httpx level using respx. + +These tests verify that tools construct correct GraphQL requests, +pass proper variables, use correct timeouts, and handle HTTP-level +errors appropriately. Unlike the tool-level tests (which mock +make_graphql_request), these tests intercept the actual HTTP call +to verify the full request pipeline. +""" + +import json +from typing import Any +from unittest.mock import patch + +import httpx +import pytest +import respx + +from tests.conftest import make_tool_fn +from unraid_mcp.core.client import DEFAULT_TIMEOUT, DISK_TIMEOUT, make_graphql_request +from unraid_mcp.core.exceptions import ToolError + +# --------------------------------------------------------------------------- +# Shared fixtures +# --------------------------------------------------------------------------- + +API_URL = "https://unraid.local/graphql" +API_KEY = "test-api-key-12345" + + +@pytest.fixture(autouse=True) +def _patch_config(): + """Patch API URL and key for all tests in this module.""" + with ( + patch("unraid_mcp.core.client.UNRAID_API_URL", API_URL), + patch("unraid_mcp.core.client.UNRAID_API_KEY", API_KEY), + ): + yield + + +@pytest.fixture(autouse=True) +def _reset_http_client(): + """Reset the global HTTP client between tests so respx can intercept.""" + from unraid_mcp.core import client as client_mod + + original = client_mod._http_client + client_mod._http_client = None + yield + client_mod._http_client = original + + +def _graphql_response(data: dict[str, Any] | None = None, errors: list[dict] | None = None): + """Build a standard GraphQL JSON response.""" + body: dict[str, Any] = {} + if data is not None: + body["data"] = data + if errors is not None: + body["errors"] = errors + return httpx.Response(200, json=body) + + +def _extract_request_body(request: httpx.Request) -> dict[str, Any]: + """Extract and parse the JSON body from a captured request.""" + return json.loads(request.content.decode()) + + +# =========================================================================== +# Section 1: Core client request construction +# =========================================================================== + + +class TestCoreRequestConstruction: + """Verify make_graphql_request builds correct HTTP requests.""" + + @respx.mock + async def test_sends_post_to_api_url(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) + await make_graphql_request("query { online }") + assert route.called + + @respx.mock + async def test_request_contains_query_in_body(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) + await make_graphql_request("query { online }") + body = _extract_request_body(route.calls.last.request) + assert body["query"] == "query { online }" + + @respx.mock + async def test_request_contains_variables_when_provided(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({"disk": {}})) + await make_graphql_request("query ($id: String!) { disk(id: $id) }", variables={"id": "d1"}) + body = _extract_request_body(route.calls.last.request) + assert body["variables"] == {"id": "d1"} + + @respx.mock + async def test_request_omits_variables_when_none(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) + await make_graphql_request("query { online }") + body = _extract_request_body(route.calls.last.request) + assert "variables" not in body + + @respx.mock + async def test_request_includes_api_key_header(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) + await make_graphql_request("query { online }") + req = route.calls.last.request + assert req.headers["X-API-Key"] == API_KEY + + @respx.mock + async def test_request_includes_content_type_header(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) + await make_graphql_request("query { online }") + req = route.calls.last.request + assert req.headers["Content-Type"] == "application/json" + + @respx.mock + async def test_request_includes_user_agent_header(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) + await make_graphql_request("query { online }") + req = route.calls.last.request + assert "UnraidMCPServer/" in req.headers["User-Agent"] + + +# =========================================================================== +# Section 2: Timeout handling +# =========================================================================== + + +class TestTimeoutHandling: + """Verify timeout configuration is passed correctly.""" + + @respx.mock + async def test_default_timeout_values(self) -> None: + assert DEFAULT_TIMEOUT.read == 30.0 + assert DEFAULT_TIMEOUT.connect == 5.0 + + @respx.mock + async def test_disk_timeout_values(self) -> None: + assert DISK_TIMEOUT.read == 90.0 + assert DISK_TIMEOUT.connect == 5.0 + + @respx.mock + async def test_custom_timeout_is_used(self) -> None: + """When custom_timeout is passed, the request uses it.""" + route = respx.post(API_URL).mock(return_value=_graphql_response({"data": {}})) + custom = httpx.Timeout(10.0, read=120.0) + await make_graphql_request("query { info }", custom_timeout=custom) + # The request was made successfully (no timeout error) + assert route.called + + +# =========================================================================== +# Section 3: HTTP error handling +# =========================================================================== + + +class TestHttpErrorHandling: + """Verify HTTP-level errors are properly converted to ToolError.""" + + @respx.mock + async def test_http_401_raises_tool_error(self) -> None: + respx.post(API_URL).mock(return_value=httpx.Response(401, text="Unauthorized")) + with pytest.raises(ToolError, match="HTTP error 401"): + await make_graphql_request("query { online }") + + @respx.mock + async def test_http_403_raises_tool_error(self) -> None: + respx.post(API_URL).mock(return_value=httpx.Response(403, text="Forbidden")) + with pytest.raises(ToolError, match="HTTP error 403"): + await make_graphql_request("query { online }") + + @respx.mock + async def test_http_500_raises_tool_error(self) -> None: + respx.post(API_URL).mock(return_value=httpx.Response(500, text="Internal Server Error")) + with pytest.raises(ToolError, match="HTTP error 500"): + await make_graphql_request("query { online }") + + @respx.mock + async def test_http_503_raises_tool_error(self) -> None: + respx.post(API_URL).mock(return_value=httpx.Response(503, text="Service Unavailable")) + with pytest.raises(ToolError, match="HTTP error 503"): + await make_graphql_request("query { online }") + + @respx.mock + async def test_network_connection_error(self) -> None: + respx.post(API_URL).mock(side_effect=httpx.ConnectError("Connection refused")) + with pytest.raises(ToolError, match="Network connection error"): + await make_graphql_request("query { online }") + + @respx.mock + async def test_network_timeout_error(self) -> None: + respx.post(API_URL).mock(side_effect=httpx.ReadTimeout("Read timed out")) + with pytest.raises(ToolError, match="Network connection error"): + await make_graphql_request("query { online }") + + @respx.mock + async def test_invalid_json_response(self) -> None: + respx.post(API_URL).mock(return_value=httpx.Response(200, text="not json")) + with pytest.raises(ToolError, match="Invalid JSON response"): + await make_graphql_request("query { online }") + + +# =========================================================================== +# Section 4: GraphQL error handling at HTTP layer +# =========================================================================== + + +class TestGraphQLErrorHandling: + """Verify GraphQL-level errors in the HTTP response body.""" + + @respx.mock + async def test_graphql_error_raises_tool_error(self) -> None: + respx.post(API_URL).mock( + return_value=_graphql_response(errors=[{"message": "Field 'bogus' not found"}]) + ) + with pytest.raises(ToolError, match="Field 'bogus' not found"): + await make_graphql_request("{ bogus }") + + @respx.mock + async def test_multiple_graphql_errors_joined(self) -> None: + respx.post(API_URL).mock( + return_value=_graphql_response( + errors=[{"message": "Error one"}, {"message": "Error two"}] + ) + ) + with pytest.raises(ToolError, match="Error one; Error two"): + await make_graphql_request("{ info }") + + @respx.mock + async def test_idempotent_start_error_returns_success(self) -> None: + respx.post(API_URL).mock( + return_value=_graphql_response( + errors=[{"message": "Container already running"}] + ) + ) + result = await make_graphql_request( + 'mutation { docker { start(id: "x") } }', + operation_context={"operation": "start"}, + ) + assert result["idempotent_success"] is True + assert result["operation"] == "start" + + @respx.mock + async def test_idempotent_stop_error_returns_success(self) -> None: + respx.post(API_URL).mock( + return_value=_graphql_response( + errors=[{"message": "Container not running"}] + ) + ) + result = await make_graphql_request( + 'mutation { docker { stop(id: "x") } }', + operation_context={"operation": "stop"}, + ) + assert result["idempotent_success"] is True + + @respx.mock + async def test_empty_data_returns_empty_dict(self) -> None: + respx.post(API_URL).mock(return_value=_graphql_response(data=None)) + result = await make_graphql_request("query { info }") + assert result == {} + + +# =========================================================================== +# Section 5: Info tool request construction +# =========================================================================== + + +class TestInfoToolRequests: + """Verify unraid_info tool constructs correct GraphQL queries.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + + @respx.mock + async def test_overview_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"info": {"os": {"platform": "linux", "hostname": "tower"}, "cpu": {}, "memory": {}}} + ) + ) + tool = self._get_tool() + await tool(action="overview") + body = _extract_request_body(route.calls.last.request) + assert "GetSystemInfo" in body["query"] + assert "info" in body["query"] + + @respx.mock + async def test_array_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"array": {"state": "STARTED", "capacity": {}}}) + ) + tool = self._get_tool() + await tool(action="array") + body = _extract_request_body(route.calls.last.request) + assert "GetArrayStatus" in body["query"] + + @respx.mock + async def test_network_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"network": {"id": "n1", "accessUrls": []}}) + ) + tool = self._get_tool() + await tool(action="network") + body = _extract_request_body(route.calls.last.request) + assert "GetNetworkConfig" in body["query"] + + @respx.mock + async def test_metrics_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"metrics": {"cpu": {"used": 50}, "memory": {"used": 4096, "total": 16384}}}) + ) + tool = self._get_tool() + await tool(action="metrics") + body = _extract_request_body(route.calls.last.request) + assert "GetMetrics" in body["query"] + + @respx.mock + async def test_ups_device_sends_variables(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"upsDeviceById": {"id": "ups1", "model": "APC"}}) + ) + tool = self._get_tool() + await tool(action="ups_device", device_id="ups1") + body = _extract_request_body(route.calls.last.request) + assert body["variables"] == {"id": "ups1"} + assert "GetUpsDevice" in body["query"] + + @respx.mock + async def test_online_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"online": True}) + ) + tool = self._get_tool() + await tool(action="online") + body = _extract_request_body(route.calls.last.request) + assert "GetOnline" in body["query"] + + @respx.mock + async def test_servers_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"servers": [{"id": "s1", "name": "tower"}]}) + ) + tool = self._get_tool() + await tool(action="servers") + body = _extract_request_body(route.calls.last.request) + assert "GetServers" in body["query"] + + @respx.mock + async def test_flash_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"flash": {"id": "f1", "guid": "abc"}}) + ) + tool = self._get_tool() + await tool(action="flash") + body = _extract_request_body(route.calls.last.request) + assert "GetFlash" in body["query"] + + +# =========================================================================== +# Section 6: Docker tool request construction +# =========================================================================== + + +class TestDockerToolRequests: + """Verify unraid_docker tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker") + + @respx.mock + async def test_list_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"docker": {"containers": [{"id": "c1", "names": ["plex"], "state": "running"}]}} + ) + ) + tool = self._get_tool() + await tool(action="list") + body = _extract_request_body(route.calls.last.request) + assert "ListDockerContainers" in body["query"] + + @respx.mock + async def test_start_sends_mutation_with_id(self) -> None: + container_id = "a" * 64 + # First call: resolve container ID (already matches pattern, so no resolution needed) + # The tool sends the mutation directly since the ID matches _DOCKER_ID_PATTERN + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"docker": {"start": {"id": container_id, "names": ["plex"], "state": "running", "status": "Up"}}} + ) + ) + tool = self._get_tool() + await tool(action="start", container_id=container_id) + body = _extract_request_body(route.calls.last.request) + assert "StartContainer" in body["query"] + assert body["variables"] == {"id": container_id} + + @respx.mock + async def test_stop_sends_mutation_with_id(self) -> None: + container_id = "b" * 64 + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"docker": {"stop": {"id": container_id, "names": ["sonarr"], "state": "exited", "status": "Exited"}}} + ) + ) + tool = self._get_tool() + await tool(action="stop", container_id=container_id) + body = _extract_request_body(route.calls.last.request) + assert "StopContainer" in body["query"] + assert body["variables"] == {"id": container_id} + + @respx.mock + async def test_remove_requires_confirm(self) -> None: + tool = self._get_tool() + with pytest.raises(ToolError, match="destructive"): + await tool(action="remove", container_id="a" * 64) + + @respx.mock + async def test_remove_sends_mutation_when_confirmed(self) -> None: + container_id = "c" * 64 + route = respx.post(API_URL).mock( + return_value=_graphql_response({"docker": {"removeContainer": True}}) + ) + tool = self._get_tool() + await tool(action="remove", container_id=container_id, confirm=True) + body = _extract_request_body(route.calls.last.request) + assert "RemoveContainer" in body["query"] + + @respx.mock + async def test_logs_sends_query_with_tail(self) -> None: + container_id = "d" * 64 + route = respx.post(API_URL).mock( + return_value=_graphql_response({"docker": {"logs": "line1\nline2"}}) + ) + tool = self._get_tool() + await tool(action="logs", container_id=container_id, tail_lines=50) + body = _extract_request_body(route.calls.last.request) + assert "GetContainerLogs" in body["query"] + assert body["variables"]["tail"] == 50 + + @respx.mock + async def test_networks_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"dockerNetworks": [{"id": "n1", "name": "bridge", "driver": "bridge", "scope": "local"}]} + ) + ) + tool = self._get_tool() + await tool(action="networks") + body = _extract_request_body(route.calls.last.request) + assert "GetDockerNetworks" in body["query"] + + @respx.mock + async def test_check_updates_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"docker": {"containerUpdateStatuses": []}} + ) + ) + tool = self._get_tool() + await tool(action="check_updates") + body = _extract_request_body(route.calls.last.request) + assert "CheckContainerUpdates" in body["query"] + + @respx.mock + async def test_restart_sends_stop_then_start(self) -> None: + """Restart is a compound action: stop + start. Verify both are sent.""" + container_id = "e" * 64 + call_count = 0 + + def side_effect(request: httpx.Request) -> httpx.Response: + nonlocal call_count + body = json.loads(request.content.decode()) + call_count += 1 + if "StopContainer" in body["query"]: + return _graphql_response( + {"docker": {"stop": {"id": container_id, "names": ["app"], "state": "exited", "status": "Exited"}}} + ) + if "StartContainer" in body["query"]: + return _graphql_response( + {"docker": {"start": {"id": container_id, "names": ["app"], "state": "running", "status": "Up"}}} + ) + return _graphql_response({"docker": {"containers": []}}) + + respx.post(API_URL).mock(side_effect=side_effect) + tool = self._get_tool() + result = await tool(action="restart", container_id=container_id) + assert result["success"] is True + assert result["action"] == "restart" + assert call_count == 2 + + @respx.mock + async def test_container_name_resolution(self) -> None: + """When a name is provided instead of a PrefixedID, the tool resolves it first.""" + resolved_id = "f" * 64 + call_count = 0 + + def side_effect(request: httpx.Request) -> httpx.Response: + nonlocal call_count + body = json.loads(request.content.decode()) + call_count += 1 + if "ResolveContainerID" in body["query"]: + return _graphql_response( + {"docker": {"containers": [{"id": resolved_id, "names": ["plex"]}]}} + ) + if "StartContainer" in body["query"]: + return _graphql_response( + {"docker": {"start": {"id": resolved_id, "names": ["plex"], "state": "running", "status": "Up"}}} + ) + return _graphql_response({}) + + respx.post(API_URL).mock(side_effect=side_effect) + tool = self._get_tool() + result = await tool(action="start", container_id="plex") + assert call_count == 2 # resolve + start + assert result["success"] is True + + +# =========================================================================== +# Section 7: VM tool request construction +# =========================================================================== + + +class TestVMToolRequests: + """Verify unraid_vm tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm") + + @respx.mock + async def test_list_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"vms": {"domains": [{"id": "v1", "name": "win10", "state": "running", "uuid": "u1"}]}} + ) + ) + tool = self._get_tool() + result = await tool(action="list") + body = _extract_request_body(route.calls.last.request) + assert "ListVMs" in body["query"] + assert "vms" in result + + @respx.mock + async def test_start_sends_mutation_with_id(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"vm": {"start": True}}) + ) + tool = self._get_tool() + result = await tool(action="start", vm_id="vm-123") + body = _extract_request_body(route.calls.last.request) + assert "StartVM" in body["query"] + assert body["variables"] == {"id": "vm-123"} + assert result["success"] is True + + @respx.mock + async def test_stop_sends_mutation_with_id(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"vm": {"stop": True}}) + ) + tool = self._get_tool() + result = await tool(action="stop", vm_id="vm-456") + body = _extract_request_body(route.calls.last.request) + assert "StopVM" in body["query"] + assert body["variables"] == {"id": "vm-456"} + + @respx.mock + async def test_force_stop_requires_confirm(self) -> None: + tool = self._get_tool() + with pytest.raises(ToolError, match="destructive"): + await tool(action="force_stop", vm_id="vm-789") + + @respx.mock + async def test_force_stop_sends_mutation_when_confirmed(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"vm": {"forceStop": True}}) + ) + tool = self._get_tool() + result = await tool(action="force_stop", vm_id="vm-789", confirm=True) + body = _extract_request_body(route.calls.last.request) + assert "ForceStopVM" in body["query"] + assert result["success"] is True + + @respx.mock + async def test_reset_requires_confirm(self) -> None: + tool = self._get_tool() + with pytest.raises(ToolError, match="destructive"): + await tool(action="reset", vm_id="vm-abc") + + @respx.mock + async def test_details_finds_vm_by_name(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"vms": {"domains": [ + {"id": "v1", "name": "win10", "state": "running", "uuid": "uuid-1"}, + {"id": "v2", "name": "ubuntu", "state": "stopped", "uuid": "uuid-2"}, + ]}} + ) + ) + tool = self._get_tool() + result = await tool(action="details", vm_id="ubuntu") + assert result["name"] == "ubuntu" + + +# =========================================================================== +# Section 8: Array tool request construction +# =========================================================================== + + +class TestArrayToolRequests: + """Verify unraid_array tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.array", "register_array_tool", "unraid_array") + + @respx.mock + async def test_parity_status_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"array": {"parityCheckStatus": {"progress": 50, "speed": "100 MB/s", "errors": 0}}} + ) + ) + tool = self._get_tool() + result = await tool(action="parity_status") + body = _extract_request_body(route.calls.last.request) + assert "GetParityStatus" in body["query"] + assert result["success"] is True + + @respx.mock + async def test_parity_start_sends_mutation(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"parityCheck": {"start": True}}) + ) + tool = self._get_tool() + result = await tool(action="parity_start") + body = _extract_request_body(route.calls.last.request) + assert "StartParityCheck" in body["query"] + assert result["success"] is True + + @respx.mock + async def test_parity_start_with_correct_sends_variable(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"parityCheck": {"start": True}}) + ) + tool = self._get_tool() + await tool(action="parity_start", correct=True) + body = _extract_request_body(route.calls.last.request) + assert body["variables"] == {"correct": True} + + @respx.mock + async def test_parity_pause_sends_mutation(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"parityCheck": {"pause": True}}) + ) + tool = self._get_tool() + await tool(action="parity_pause") + body = _extract_request_body(route.calls.last.request) + assert "PauseParityCheck" in body["query"] + + @respx.mock + async def test_parity_cancel_sends_mutation(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"parityCheck": {"cancel": True}}) + ) + tool = self._get_tool() + await tool(action="parity_cancel") + body = _extract_request_body(route.calls.last.request) + assert "CancelParityCheck" in body["query"] + + +# =========================================================================== +# Section 9: Storage tool request construction +# =========================================================================== + + +class TestStorageToolRequests: + """Verify unraid_storage tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage") + + @respx.mock + async def test_shares_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"shares": [{"id": "s1", "name": "appdata"}]}) + ) + tool = self._get_tool() + result = await tool(action="shares") + body = _extract_request_body(route.calls.last.request) + assert "GetSharesInfo" in body["query"] + assert "shares" in result + + @respx.mock + async def test_disks_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"disks": [{"id": "d1", "device": "sda", "name": "Disk 1"}]}) + ) + tool = self._get_tool() + await tool(action="disks") + body = _extract_request_body(route.calls.last.request) + assert "ListPhysicalDisks" in body["query"] + + @respx.mock + async def test_disk_details_sends_variable(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"disk": {"id": "d1", "device": "sda", "name": "Disk 1", "serialNum": "SN123", "size": 1000000, "temperature": 35}} + ) + ) + tool = self._get_tool() + await tool(action="disk_details", disk_id="d1") + body = _extract_request_body(route.calls.last.request) + assert "GetDiskDetails" in body["query"] + assert body["variables"] == {"id": "d1"} + + @respx.mock + async def test_log_files_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"logFiles": [{"name": "syslog", "path": "/var/log/syslog"}]}) + ) + tool = self._get_tool() + result = await tool(action="log_files") + body = _extract_request_body(route.calls.last.request) + assert "ListLogFiles" in body["query"] + assert "log_files" in result + + @respx.mock + async def test_logs_sends_path_and_lines_variables(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"logFile": {"path": "/var/log/syslog", "content": "log line", "totalLines": 100, "startLine": 1}} + ) + ) + tool = self._get_tool() + await tool(action="logs", log_path="/var/log/syslog", tail_lines=50) + body = _extract_request_body(route.calls.last.request) + assert "GetLogContent" in body["query"] + assert body["variables"]["path"] == "/var/log/syslog" + assert body["variables"]["lines"] == 50 + + @respx.mock + async def test_logs_rejects_path_traversal(self) -> None: + tool = self._get_tool() + with pytest.raises(ToolError, match="log_path must start with"): + await tool(action="logs", log_path="/etc/shadow") + + @respx.mock + async def test_unassigned_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"unassignedDevices": []}) + ) + tool = self._get_tool() + result = await tool(action="unassigned") + body = _extract_request_body(route.calls.last.request) + assert "GetUnassignedDevices" in body["query"] + assert "devices" in result + + +# =========================================================================== +# Section 10: Notifications tool request construction +# =========================================================================== + + +class TestNotificationsToolRequests: + """Verify unraid_notifications tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn( + "unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications" + ) + + @respx.mock + async def test_overview_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"notifications": {"overview": {"unread": {"info": 1, "warning": 0, "alert": 0, "total": 1}}}} + ) + ) + tool = self._get_tool() + await tool(action="overview") + body = _extract_request_body(route.calls.last.request) + assert "GetNotificationsOverview" in body["query"] + + @respx.mock + async def test_list_sends_filter_variables(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"notifications": {"list": []}}) + ) + tool = self._get_tool() + await tool(action="list", list_type="ARCHIVE", importance="WARNING", offset=5, limit=10) + body = _extract_request_body(route.calls.last.request) + assert "ListNotifications" in body["query"] + filt = body["variables"]["filter"] + assert filt["type"] == "ARCHIVE" + assert filt["importance"] == "WARNING" + assert filt["offset"] == 5 + assert filt["limit"] == 10 + + @respx.mock + async def test_warnings_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"notifications": {"warningsAndAlerts": []}}) + ) + tool = self._get_tool() + result = await tool(action="warnings") + body = _extract_request_body(route.calls.last.request) + assert "GetWarningsAndAlerts" in body["query"] + assert "warnings" in result + + @respx.mock + async def test_create_sends_input_variables(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"notifications": {"createNotification": {"id": "n1", "title": "Test", "importance": "INFO"}}} + ) + ) + tool = self._get_tool() + await tool( + action="create", title="Test", subject="Sub", description="Desc", importance="info" + ) + body = _extract_request_body(route.calls.last.request) + assert "CreateNotification" in body["query"] + inp = body["variables"]["input"] + assert inp["title"] == "Test" + assert inp["subject"] == "Sub" + assert inp["importance"] == "INFO" # uppercased + + @respx.mock + async def test_archive_sends_id_variable(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"notifications": {"archiveNotification": True}}) + ) + tool = self._get_tool() + await tool(action="archive", notification_id="notif-1") + body = _extract_request_body(route.calls.last.request) + assert "ArchiveNotification" in body["query"] + assert body["variables"] == {"id": "notif-1"} + + @respx.mock + async def test_delete_requires_confirm(self) -> None: + tool = self._get_tool() + with pytest.raises(ToolError, match="destructive"): + await tool(action="delete", notification_id="n1", notification_type="UNREAD") + + @respx.mock + async def test_delete_sends_id_and_type(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"notifications": {"deleteNotification": True}}) + ) + tool = self._get_tool() + await tool( + action="delete", notification_id="n1", notification_type="unread", confirm=True + ) + body = _extract_request_body(route.calls.last.request) + assert "DeleteNotification" in body["query"] + assert body["variables"]["id"] == "n1" + assert body["variables"]["type"] == "UNREAD" # uppercased + + @respx.mock + async def test_archive_all_sends_importance_when_provided(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"notifications": {"archiveAll": True}}) + ) + tool = self._get_tool() + await tool(action="archive_all", importance="warning") + body = _extract_request_body(route.calls.last.request) + assert "ArchiveAllNotifications" in body["query"] + assert body["variables"]["importance"] == "WARNING" + + +# =========================================================================== +# Section 11: RClone tool request construction +# =========================================================================== + + +class TestRCloneToolRequests: + """Verify unraid_rclone tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone") + + @respx.mock + async def test_list_remotes_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"rclone": {"remotes": [{"name": "gdrive", "type": "drive"}]}} + ) + ) + tool = self._get_tool() + result = await tool(action="list_remotes") + body = _extract_request_body(route.calls.last.request) + assert "ListRCloneRemotes" in body["query"] + assert "remotes" in result + + @respx.mock + async def test_config_form_sends_provider_type(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"rclone": {"configForm": {"id": "form1", "dataSchema": {}, "uiSchema": {}}}} + ) + ) + tool = self._get_tool() + await tool(action="config_form", provider_type="s3") + body = _extract_request_body(route.calls.last.request) + assert "GetRCloneConfigForm" in body["query"] + assert body["variables"]["formOptions"]["providerType"] == "s3" + + @respx.mock + async def test_create_remote_sends_input_variables(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"rclone": {"createRCloneRemote": {"name": "my-s3", "type": "s3", "parameters": {}}}} + ) + ) + tool = self._get_tool() + await tool( + action="create_remote", + name="my-s3", + provider_type="s3", + config_data={"bucket": "my-bucket"}, + ) + body = _extract_request_body(route.calls.last.request) + assert "CreateRCloneRemote" in body["query"] + inp = body["variables"]["input"] + assert inp["name"] == "my-s3" + assert inp["type"] == "s3" + assert inp["config"] == {"bucket": "my-bucket"} + + @respx.mock + async def test_delete_remote_requires_confirm(self) -> None: + tool = self._get_tool() + with pytest.raises(ToolError, match="destructive"): + await tool(action="delete_remote", name="old-remote") + + @respx.mock + async def test_delete_remote_sends_name_when_confirmed(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"rclone": {"deleteRCloneRemote": True}}) + ) + tool = self._get_tool() + result = await tool(action="delete_remote", name="old-remote", confirm=True) + body = _extract_request_body(route.calls.last.request) + assert "DeleteRCloneRemote" in body["query"] + assert body["variables"]["input"]["name"] == "old-remote" + assert result["success"] is True + + +# =========================================================================== +# Section 12: Users tool request construction +# =========================================================================== + + +class TestUsersToolRequests: + """Verify unraid_users tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.users", "register_users_tool", "unraid_users") + + @respx.mock + async def test_me_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"me": {"id": "u1", "name": "admin", "description": "Admin", "roles": ["admin"]}} + ) + ) + tool = self._get_tool() + result = await tool(action="me") + body = _extract_request_body(route.calls.last.request) + assert "GetMe" in body["query"] + assert result["name"] == "admin" + + +# =========================================================================== +# Section 13: Keys tool request construction +# =========================================================================== + + +class TestKeysToolRequests: + """Verify unraid_keys tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys") + + @respx.mock + async def test_list_sends_correct_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"apiKeys": [{"id": "k1", "name": "my-key"}]}) + ) + tool = self._get_tool() + result = await tool(action="list") + body = _extract_request_body(route.calls.last.request) + assert "ListApiKeys" in body["query"] + assert "keys" in result + + @respx.mock + async def test_get_sends_id_variable(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"apiKey": {"id": "k1", "name": "my-key", "roles": ["admin"]}} + ) + ) + tool = self._get_tool() + await tool(action="get", key_id="k1") + body = _extract_request_body(route.calls.last.request) + assert "GetApiKey" in body["query"] + assert body["variables"] == {"id": "k1"} + + @respx.mock + async def test_create_sends_input_variables(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"createApiKey": {"id": "k2", "name": "new-key", "key": "secret", "roles": ["read"]}} + ) + ) + tool = self._get_tool() + result = await tool(action="create", name="new-key", roles=["read"]) + body = _extract_request_body(route.calls.last.request) + assert "CreateApiKey" in body["query"] + inp = body["variables"]["input"] + assert inp["name"] == "new-key" + assert inp["roles"] == ["read"] + assert result["success"] is True + + @respx.mock + async def test_update_sends_input_variables(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response( + {"updateApiKey": {"id": "k1", "name": "renamed", "roles": ["admin"]}} + ) + ) + tool = self._get_tool() + await tool(action="update", key_id="k1", name="renamed") + body = _extract_request_body(route.calls.last.request) + assert "UpdateApiKey" in body["query"] + inp = body["variables"]["input"] + assert inp["id"] == "k1" + assert inp["name"] == "renamed" + + @respx.mock + async def test_delete_requires_confirm(self) -> None: + tool = self._get_tool() + with pytest.raises(ToolError, match="destructive"): + await tool(action="delete", key_id="k1") + + @respx.mock + async def test_delete_sends_ids_when_confirmed(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"deleteApiKeys": True}) + ) + tool = self._get_tool() + result = await tool(action="delete", key_id="k1", confirm=True) + body = _extract_request_body(route.calls.last.request) + assert "DeleteApiKeys" in body["query"] + assert body["variables"]["input"]["ids"] == ["k1"] + assert result["success"] is True + + +# =========================================================================== +# Section 14: Health tool request construction +# =========================================================================== + + +class TestHealthToolRequests: + """Verify unraid_health tool constructs correct requests.""" + + @staticmethod + def _get_tool(): + return make_tool_fn("unraid_mcp.tools.health", "register_health_tool", "unraid_health") + + @respx.mock + async def test_test_connection_sends_online_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({"online": True}) + ) + tool = self._get_tool() + result = await tool(action="test_connection") + body = _extract_request_body(route.calls.last.request) + assert "online" in body["query"] + assert result["status"] == "connected" + assert result["online"] is True + + @respx.mock + async def test_check_sends_comprehensive_query(self) -> None: + route = respx.post(API_URL).mock( + return_value=_graphql_response({ + "info": { + "machineId": "m1", + "time": 1234567890, + "versions": {"unraid": "7.0"}, + "os": {"uptime": 86400}, + }, + "array": {"state": "STARTED"}, + "notifications": {"overview": {"unread": {"alert": 0, "warning": 1, "total": 3}}}, + "docker": {"containers": [{"id": "c1", "state": "running", "status": "Up"}]}, + }) + ) + tool = self._get_tool() + result = await tool(action="check") + body = _extract_request_body(route.calls.last.request) + assert "ComprehensiveHealthCheck" in body["query"] + assert result["status"] == "healthy" + assert "api_latency_ms" in result + + @respx.mock + async def test_test_connection_measures_latency(self) -> None: + respx.post(API_URL).mock(return_value=_graphql_response({"online": True})) + tool = self._get_tool() + result = await tool(action="test_connection") + assert "latency_ms" in result + assert isinstance(result["latency_ms"], float) + + @respx.mock + async def test_check_reports_warning_on_alerts(self) -> None: + respx.post(API_URL).mock( + return_value=_graphql_response({ + "info": {"machineId": "m1", "time": 0, "versions": {"unraid": "7.0"}, "os": {"uptime": 0}}, + "array": {"state": "STARTED"}, + "notifications": {"overview": {"unread": {"alert": 3, "warning": 0, "total": 5}}}, + "docker": {"containers": []}, + }) + ) + tool = self._get_tool() + result = await tool(action="check") + assert result["status"] == "warning" + assert any("alert" in issue for issue in result.get("issues", [])) + + +# =========================================================================== +# Section 15: Cross-cutting concerns +# =========================================================================== + + +class TestCrossCuttingConcerns: + """Verify behaviors that apply across multiple tools.""" + + @respx.mock + async def test_missing_api_url_raises_before_http_call(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({})) + with ( + patch("unraid_mcp.core.client.UNRAID_API_URL", ""), + pytest.raises(ToolError, match="UNRAID_API_URL not configured"), + ): + await make_graphql_request("query { online }") + assert not route.called # HTTP request should never be made + + @respx.mock + async def test_missing_api_key_raises_before_http_call(self) -> None: + route = respx.post(API_URL).mock(return_value=_graphql_response({})) + with ( + patch("unraid_mcp.core.client.UNRAID_API_KEY", ""), + pytest.raises(ToolError, match="UNRAID_API_KEY not configured"), + ): + await make_graphql_request("query { online }") + assert not route.called + + @respx.mock + async def test_tool_error_from_http_layer_propagates(self) -> None: + """When an HTTP error occurs, the ToolError bubbles up through the tool.""" + respx.post(API_URL).mock(return_value=httpx.Response(500, text="Server Error")) + tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + with pytest.raises(ToolError, match="HTTP error 500"): + await tool(action="online") + + @respx.mock + async def test_network_error_propagates_through_tool(self) -> None: + """When a network error occurs, the ToolError bubbles up through the tool.""" + respx.post(API_URL).mock(side_effect=httpx.ConnectError("Connection refused")) + tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + with pytest.raises(ToolError, match="Network connection error"): + await tool(action="online") + + @respx.mock + async def test_graphql_error_propagates_through_tool(self) -> None: + """When a GraphQL error occurs, the ToolError bubbles up through the tool.""" + respx.post(API_URL).mock( + return_value=_graphql_response(errors=[{"message": "Permission denied"}]) + ) + tool = make_tool_fn("unraid_mcp.tools.info", "register_info_tool", "unraid_info") + with pytest.raises(ToolError, match="Permission denied"): + await tool(action="online") diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/test_subscriptions.py b/tests/integration/test_subscriptions.py new file mode 100644 index 0000000..df697cd --- /dev/null +++ b/tests/integration/test_subscriptions.py @@ -0,0 +1,1010 @@ +"""Integration tests for WebSocket subscription lifecycle and reconnection logic. + +These tests validate the SubscriptionManager's connection lifecycle, +reconnection with exponential backoff, protocol handling, and resource +data management without requiring a live Unraid server. +""" + +import asyncio +import json +from datetime import datetime +from typing import Any +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from unraid_mcp.subscriptions.manager import SubscriptionManager + +pytestmark = pytest.mark.integration + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _make_ws_mock( + recv_messages: list[str | dict[str, Any]] | None = None, + subprotocol: str = "graphql-transport-ws", +) -> AsyncMock: + """Build an AsyncMock that behaves like a websockets connection. + + Args: + recv_messages: Ordered list of messages ``recv()`` should return. + Dicts are auto-serialised to JSON strings. + subprotocol: The negotiated subprotocol value. + """ + ws = AsyncMock() + ws.subprotocol = subprotocol + + if recv_messages is None: + recv_messages = [{"type": "connection_ack"}] + + serialised: list[str] = [ + json.dumps(m) if isinstance(m, dict) else m for m in recv_messages + ] + ws.recv = AsyncMock(side_effect=serialised) + ws.send = AsyncMock() + + # Support ``async for message in websocket:`` + # After recv() values are exhausted we raise StopAsyncIteration. + ws.__aiter__ = MagicMock(return_value=ws) + ws.__anext__ = AsyncMock(side_effect=serialised[1:] + [StopAsyncIteration()]) + + return ws + + +def _ws_context(ws_mock: AsyncMock) -> AsyncMock: + """Wrap *ws_mock* so ``async with websockets.connect(...) as ws:`` works.""" + ctx = AsyncMock() + ctx.__aenter__ = AsyncMock(return_value=ws_mock) + ctx.__aexit__ = AsyncMock(return_value=False) + return ctx + + +SAMPLE_QUERY = "subscription { test { value } }" + + +# --------------------------------------------------------------------------- +# SubscriptionManager Initialisation +# --------------------------------------------------------------------------- + +class TestSubscriptionManagerInit: + """Tests for SubscriptionManager constructor and defaults.""" + + def test_default_state(self) -> None: + mgr = SubscriptionManager() + assert mgr.active_subscriptions == {} + assert mgr.resource_data == {} + assert mgr.websocket is None + + def test_default_auto_start_enabled(self) -> None: + mgr = SubscriptionManager() + assert mgr.auto_start_enabled is True + + @patch.dict("os.environ", {"UNRAID_AUTO_START_SUBSCRIPTIONS": "false"}) + def test_auto_start_disabled_via_env(self) -> None: + mgr = SubscriptionManager() + assert mgr.auto_start_enabled is False + + def test_default_max_reconnect_attempts(self) -> None: + mgr = SubscriptionManager() + assert mgr.max_reconnect_attempts == 10 + + @patch.dict("os.environ", {"UNRAID_MAX_RECONNECT_ATTEMPTS": "5"}) + def test_custom_max_reconnect_attempts(self) -> None: + mgr = SubscriptionManager() + assert mgr.max_reconnect_attempts == 5 + + def test_subscription_configs_contain_log_file(self) -> None: + mgr = SubscriptionManager() + assert "logFileSubscription" in mgr.subscription_configs + + def test_log_file_subscription_not_auto_start(self) -> None: + mgr = SubscriptionManager() + cfg = mgr.subscription_configs["logFileSubscription"] + assert cfg.get("auto_start") is False + + +# --------------------------------------------------------------------------- +# Connection Lifecycle +# --------------------------------------------------------------------------- + +class TestConnectionLifecycle: + """Tests for connect -> subscribe -> receive -> disconnect flow.""" + + @pytest.mark.asyncio + async def test_start_subscription_creates_task(self) -> None: + mgr = SubscriptionManager() + ws = _make_ws_mock() + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "test-key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + await mgr.start_subscription("test_sub", SAMPLE_QUERY) + assert "test_sub" in mgr.active_subscriptions + assert isinstance(mgr.active_subscriptions["test_sub"], asyncio.Task) + # Cleanup + await mgr.stop_subscription("test_sub") + + @pytest.mark.asyncio + async def test_duplicate_start_is_noop(self) -> None: + mgr = SubscriptionManager() + ws = _make_ws_mock() + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "test-key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + await mgr.start_subscription("test_sub", SAMPLE_QUERY) + first_task = mgr.active_subscriptions["test_sub"] + # Second start should be a no-op + await mgr.start_subscription("test_sub", SAMPLE_QUERY) + assert mgr.active_subscriptions["test_sub"] is first_task + await mgr.stop_subscription("test_sub") + + @pytest.mark.asyncio + async def test_stop_subscription_cancels_task(self) -> None: + mgr = SubscriptionManager() + ws = _make_ws_mock() + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "test-key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + await mgr.start_subscription("test_sub", SAMPLE_QUERY) + assert "test_sub" in mgr.active_subscriptions + await mgr.stop_subscription("test_sub") + assert "test_sub" not in mgr.active_subscriptions + assert mgr.connection_states.get("test_sub") == "stopped" + + @pytest.mark.asyncio + async def test_stop_nonexistent_subscription_is_safe(self) -> None: + mgr = SubscriptionManager() + # Should not raise + await mgr.stop_subscription("nonexistent") + + @pytest.mark.asyncio + async def test_connection_state_transitions(self) -> None: + """Verify state goes through starting -> active during start_subscription.""" + mgr = SubscriptionManager() + ws = _make_ws_mock() + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "test-key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + await mgr.start_subscription("test_sub", SAMPLE_QUERY) + # After start_subscription returns, state should be "active" + assert mgr.connection_states["test_sub"] == "active" + await mgr.stop_subscription("test_sub") + + +# --------------------------------------------------------------------------- +# Protocol Handling +# --------------------------------------------------------------------------- + +class TestProtocolHandling: + """Tests for GraphQL-WS protocol message handling inside _subscription_loop.""" + + @pytest.mark.asyncio + async def test_connection_init_sends_auth(self) -> None: + """Verify connection_init includes X-API-Key header.""" + mgr = SubscriptionManager() + + data_msg = {"type": "next", "id": "test_sub", "payload": {"data": {"test": "value"}}} + complete_msg = {"type": "complete", "id": "test_sub"} + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + data_msg, + complete_msg, + ] + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "my-secret-key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + # Run the loop directly (will break on "complete" message) + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + # First send call should be connection_init + first_send = ws.send.call_args_list[0] + init_msg = json.loads(first_send[0][0]) + assert init_msg["type"] == "connection_init" + assert init_msg["payload"]["headers"]["X-API-Key"] == "my-secret-key" + + @pytest.mark.asyncio + async def test_subscribe_message_uses_correct_type_for_transport_ws(self) -> None: + """graphql-transport-ws should use 'subscribe' type, not 'start'.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-transport-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + # Second send is the subscription message + sub_send = ws.send.call_args_list[1] + sub_msg = json.loads(sub_send[0][0]) + assert sub_msg["type"] == "subscribe" + assert sub_msg["id"] == "test_sub" + + @pytest.mark.asyncio + async def test_subscribe_message_uses_start_for_graphql_ws(self) -> None: + """Legacy graphql-ws protocol should use 'start' type.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + sub_send = ws.send.call_args_list[1] + sub_msg = json.loads(sub_send[0][0]) + assert sub_msg["type"] == "start" + + @pytest.mark.asyncio + async def test_connection_error_sets_auth_failed_state(self) -> None: + """connection_error response should break the loop and set auth_failed.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_error", "payload": {"message": "Invalid API key"}}, + ] + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "bad-key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert mgr.connection_states["test_sub"] == "auth_failed" + assert "Authentication error" in mgr.last_error["test_sub"] + + @pytest.mark.asyncio + async def test_no_api_key_still_sends_init_without_payload(self) -> None: + """When no API key is set, connection_init should omit the payload.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + {"type": "complete", "id": "test_sub"}, + ] + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", ""), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + first_send = ws.send.call_args_list[0] + init_msg = json.loads(first_send[0][0]) + assert init_msg["type"] == "connection_init" + assert "payload" not in init_msg + + +# --------------------------------------------------------------------------- +# Data Reception +# --------------------------------------------------------------------------- + +class TestDataReception: + """Tests for receiving and storing subscription data.""" + + @pytest.mark.asyncio + async def test_next_message_stores_resource_data(self) -> None: + """A 'next' message with data should populate resource_data.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + { + "type": "next", + "id": "test_sub", + "payload": {"data": {"test": {"value": 42}}}, + }, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-transport-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert "test_sub" in mgr.resource_data + assert mgr.resource_data["test_sub"].data == {"test": {"value": 42}} + assert mgr.resource_data["test_sub"].subscription_type == "test_sub" + + @pytest.mark.asyncio + async def test_data_message_for_legacy_protocol(self) -> None: + """Legacy graphql-ws uses 'data' type instead of 'next'.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + { + "type": "data", + "id": "test_sub", + "payload": {"data": {"legacy": True}}, + }, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert "test_sub" in mgr.resource_data + assert mgr.resource_data["test_sub"].data == {"legacy": True} + + @pytest.mark.asyncio + async def test_graphql_errors_tracked_in_last_error(self) -> None: + """GraphQL errors in payload should be recorded in last_error.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + { + "type": "next", + "id": "test_sub", + "payload": {"errors": [{"message": "Field not found"}]}, + }, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-transport-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert "GraphQL errors" in mgr.last_error.get("test_sub", "") + + @pytest.mark.asyncio + async def test_ping_receives_pong_response(self) -> None: + """Server ping should trigger pong response.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + {"type": "ping"}, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-transport-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + # Find the pong send among all sends + pong_sent = False + for call in ws.send.call_args_list: + msg = json.loads(call[0][0]) + if msg.get("type") == "pong": + pong_sent = True + break + assert pong_sent, "Expected pong response to be sent" + + @pytest.mark.asyncio + async def test_error_message_sets_error_state(self) -> None: + """An 'error' type message should set connection state to error.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + {"type": "error", "id": "test_sub", "payload": {"message": "bad query"}}, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-transport-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert mgr.connection_states["test_sub"] in ("error", "completed") + assert "Subscription error" in mgr.last_error.get("test_sub", "") + + @pytest.mark.asyncio + async def test_complete_message_breaks_loop(self) -> None: + """A 'complete' message should end the message loop.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-transport-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert mgr.connection_states["test_sub"] in ("completed", "max_retries_exceeded") + + @pytest.mark.asyncio + async def test_mismatched_id_ignored(self) -> None: + """A data message with a different subscription id should not store data.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + { + "type": "next", + "id": "other_sub", + "payload": {"data": {"wrong": True}}, + }, + {"type": "complete", "id": "test_sub"}, + ], + subprotocol="graphql-transport-ws", + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + ): + mgr.reconnect_attempts["test_sub"] = 0 + mgr.max_reconnect_attempts = 1 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert "test_sub" not in mgr.resource_data + + +# --------------------------------------------------------------------------- +# Reconnection and Backoff +# --------------------------------------------------------------------------- + +class TestReconnection: + """Tests for reconnection logic and exponential backoff.""" + + @pytest.mark.asyncio + async def test_max_retries_exceeded_stops_loop(self) -> None: + """Loop should stop when max_reconnect_attempts is exceeded.""" + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 2 + + connect_mock = AsyncMock(side_effect=ConnectionRefusedError("refused")) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", new_callable=AsyncMock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert mgr.connection_states["test_sub"] == "max_retries_exceeded" + assert mgr.reconnect_attempts["test_sub"] > mgr.max_reconnect_attempts + + @pytest.mark.asyncio + async def test_backoff_delay_increases(self) -> None: + """Each retry should increase the backoff delay.""" + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 3 + + connect_mock = AsyncMock(side_effect=ConnectionRefusedError("refused")) + sleep_mock = AsyncMock() + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", sleep_mock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + # Verify increasing delays: initial=5, then 5*1.5=7.5, then 7.5*1.5=11.25 + delays = [call[0][0] for call in sleep_mock.call_args_list] + assert len(delays) >= 2 + for i in range(1, len(delays)): + assert delays[i] > delays[i - 1], ( + f"Delay should increase: {delays[i]} > {delays[i-1]}" + ) + + @pytest.mark.asyncio + async def test_backoff_capped_at_max(self) -> None: + """Backoff delay should not exceed 300 seconds (5 minutes).""" + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 50 + + connect_mock = AsyncMock(side_effect=ConnectionRefusedError("refused")) + sleep_mock = AsyncMock() + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", sleep_mock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + delays = [call[0][0] for call in sleep_mock.call_args_list] + for d in delays: + assert d <= 300, f"Delay {d} exceeds max of 300 seconds" + + @pytest.mark.asyncio + async def test_successful_connection_resets_retry_count(self) -> None: + """A successful connection should reset reconnect_attempts to 0.""" + mgr = SubscriptionManager() + + ws = _make_ws_mock( + recv_messages=[ + {"type": "connection_ack"}, + {"type": "complete", "id": "test_sub"}, + ], + ) + ctx = _ws_context(ws) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", return_value=ctx), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", new_callable=AsyncMock), + ): + # Pre-set a high attempt count + mgr.reconnect_attempts["test_sub"] = 5 + mgr.max_reconnect_attempts = 10 + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + # After successful connection, attempts should have been reset to 0 + # (it increments again on the next iteration, but the reset happens on connect) + # The key check is that it didn't immediately bail due to max retries + assert mgr.connection_states["test_sub"] != "max_retries_exceeded" + + @pytest.mark.asyncio + async def test_invalid_uri_does_not_retry(self) -> None: + """InvalidURI errors should break the loop without retrying.""" + import websockets.exceptions + + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 5 + + connect_mock = AsyncMock( + side_effect=websockets.exceptions.InvalidURI("bad://url", "Invalid URI") + ) + sleep_mock = AsyncMock() + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", sleep_mock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert mgr.connection_states["test_sub"] == "invalid_uri" + # Should not have retried + sleep_mock.assert_not_called() + + @pytest.mark.asyncio + async def test_timeout_error_triggers_reconnect(self) -> None: + """Timeout errors should trigger reconnection with backoff.""" + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 2 + + connect_mock = AsyncMock(side_effect=TimeoutError("connection timeout")) + sleep_mock = AsyncMock() + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", sleep_mock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert mgr.last_error["test_sub"] == "Connection or authentication timeout" + assert sleep_mock.call_count >= 1 + + @pytest.mark.asyncio + async def test_connection_closed_triggers_reconnect(self) -> None: + """ConnectionClosed errors should trigger reconnection.""" + import websockets.exceptions + from websockets.frames import Close + + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 2 + + connect_mock = AsyncMock( + side_effect=websockets.exceptions.ConnectionClosed( + Close(1006, "abnormal"), None + ) + ) + sleep_mock = AsyncMock() + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://test.local"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", sleep_mock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert "WebSocket connection closed" in mgr.last_error.get("test_sub", "") + assert mgr.connection_states["test_sub"] in ("disconnected", "max_retries_exceeded") + + +# --------------------------------------------------------------------------- +# WebSocket URL Construction +# --------------------------------------------------------------------------- + +class TestWebSocketURLConstruction: + """Tests for HTTP-to-WS URL conversion logic.""" + + @pytest.mark.asyncio + async def test_https_converted_to_wss(self) -> None: + """https:// URL should become wss://.""" + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 1 + + connect_mock = AsyncMock(side_effect=ConnectionRefusedError("test")) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "https://myserver.local:31337"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", new_callable=AsyncMock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + url_arg = connect_mock.call_args[0][0] + assert url_arg.startswith("wss://") + assert url_arg.endswith("/graphql") + + @pytest.mark.asyncio + async def test_http_converted_to_ws(self) -> None: + """http:// URL should become ws://.""" + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 1 + + connect_mock = AsyncMock(side_effect=ConnectionRefusedError("test")) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", "http://192.168.1.100:8080"), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", new_callable=AsyncMock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + url_arg = connect_mock.call_args[0][0] + assert url_arg.startswith("ws://") + assert url_arg.endswith("/graphql") + + @pytest.mark.asyncio + async def test_no_api_url_raises_value_error(self) -> None: + """Missing UNRAID_API_URL should raise ValueError and stop.""" + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 1 + + sleep_mock = AsyncMock() + + with ( + patch("unraid_mcp.subscriptions.manager.UNRAID_API_URL", ""), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", sleep_mock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + assert mgr.connection_states["test_sub"] in ("error", "max_retries_exceeded") + + @pytest.mark.asyncio + async def test_graphql_suffix_not_duplicated(self) -> None: + """URL already ending in /graphql should not get it appended again.""" + mgr = SubscriptionManager() + mgr.max_reconnect_attempts = 1 + + connect_mock = AsyncMock(side_effect=ConnectionRefusedError("test")) + + with ( + patch("unraid_mcp.subscriptions.manager.websockets.connect", connect_mock), + patch( + "unraid_mcp.subscriptions.manager.UNRAID_API_URL", + "https://myserver.local/graphql", + ), + patch("unraid_mcp.subscriptions.manager.UNRAID_API_KEY", "key"), + patch("unraid_mcp.subscriptions.manager.build_ws_ssl_context", return_value=None), + patch("unraid_mcp.subscriptions.manager.asyncio.sleep", new_callable=AsyncMock), + ): + await mgr._subscription_loop("test_sub", SAMPLE_QUERY, {}) + + url_arg = connect_mock.call_args[0][0] + assert url_arg == "wss://myserver.local/graphql" + assert "/graphql/graphql" not in url_arg + + +# --------------------------------------------------------------------------- +# Resource Data Access +# --------------------------------------------------------------------------- + +class TestResourceData: + """Tests for get_resource_data and list_active_subscriptions.""" + + def test_get_resource_data_returns_none_when_empty(self) -> None: + mgr = SubscriptionManager() + assert mgr.get_resource_data("nonexistent") is None + + def test_get_resource_data_returns_stored_data(self) -> None: + from unraid_mcp.core.types import SubscriptionData + + mgr = SubscriptionManager() + mgr.resource_data["test"] = SubscriptionData( + data={"key": "value"}, + last_updated=datetime.now(), + subscription_type="test", + ) + result = mgr.get_resource_data("test") + assert result == {"key": "value"} + + def test_list_active_subscriptions_empty(self) -> None: + mgr = SubscriptionManager() + assert mgr.list_active_subscriptions() == [] + + def test_list_active_subscriptions_returns_names(self) -> None: + mgr = SubscriptionManager() + # Simulate active subscriptions + mgr.active_subscriptions["sub_a"] = MagicMock() + mgr.active_subscriptions["sub_b"] = MagicMock() + result = mgr.list_active_subscriptions() + assert sorted(result) == ["sub_a", "sub_b"] + + +# --------------------------------------------------------------------------- +# Subscription Status Diagnostics +# --------------------------------------------------------------------------- + +class TestSubscriptionStatus: + """Tests for get_subscription_status diagnostic output.""" + + def test_status_includes_all_configured_subscriptions(self) -> None: + mgr = SubscriptionManager() + status = mgr.get_subscription_status() + for name in mgr.subscription_configs: + assert name in status + + def test_status_default_connection_state(self) -> None: + mgr = SubscriptionManager() + status = mgr.get_subscription_status() + for sub_status in status.values(): + assert sub_status["runtime"]["connection_state"] == "not_started" + + def test_status_shows_active_flag(self) -> None: + mgr = SubscriptionManager() + mgr.active_subscriptions["logFileSubscription"] = MagicMock() + status = mgr.get_subscription_status() + assert status["logFileSubscription"]["runtime"]["active"] is True + + def test_status_shows_data_availability(self) -> None: + from unraid_mcp.core.types import SubscriptionData + + mgr = SubscriptionManager() + mgr.resource_data["logFileSubscription"] = SubscriptionData( + data={"log": "content"}, + last_updated=datetime.now(), + subscription_type="logFileSubscription", + ) + status = mgr.get_subscription_status() + assert status["logFileSubscription"]["data"]["available"] is True + + def test_status_shows_error_info(self) -> None: + mgr = SubscriptionManager() + mgr.last_error["logFileSubscription"] = "Test error message" + status = mgr.get_subscription_status() + assert status["logFileSubscription"]["runtime"]["last_error"] == "Test error message" + + +# --------------------------------------------------------------------------- +# Auto-Start +# --------------------------------------------------------------------------- + +class TestAutoStart: + """Tests for auto_start_all_subscriptions.""" + + @pytest.mark.asyncio + async def test_auto_start_disabled_skips_all(self) -> None: + mgr = SubscriptionManager() + mgr.auto_start_enabled = False + # Should return without starting anything + await mgr.auto_start_all_subscriptions() + assert mgr.active_subscriptions == {} + + @pytest.mark.asyncio + async def test_auto_start_only_starts_marked_subscriptions(self) -> None: + """Only subscriptions with auto_start=True should be started.""" + mgr = SubscriptionManager() + # logFileSubscription has auto_start=False by default + with patch.object(mgr, "start_subscription", new_callable=AsyncMock) as mock_start: + await mgr.auto_start_all_subscriptions() + # logFileSubscription is auto_start=False, so no calls + mock_start.assert_not_called() + + @pytest.mark.asyncio + async def test_auto_start_handles_failure_gracefully(self) -> None: + """Failed auto-starts should log the error but not crash.""" + mgr = SubscriptionManager() + # Add a config that should auto-start + mgr.subscription_configs["test_auto"] = { + "query": "subscription { test }", + "resource": "unraid://test", + "description": "Test auto-start", + "auto_start": True, + } + + with patch.object( + mgr, "start_subscription", new_callable=AsyncMock, side_effect=RuntimeError("fail") + ): + # Should not raise + await mgr.auto_start_all_subscriptions() + assert "fail" in mgr.last_error.get("test_auto", "") + + +# --------------------------------------------------------------------------- +# SSL Context (via utils) +# --------------------------------------------------------------------------- + +class TestSSLContext: + """Tests for build_ws_ssl_context utility.""" + + def test_non_wss_returns_none(self) -> None: + from unraid_mcp.subscriptions.utils import build_ws_ssl_context + + assert build_ws_ssl_context("ws://localhost:8080/graphql") is None + + def test_wss_with_verify_true_returns_default_context(self) -> None: + import ssl + + from unraid_mcp.subscriptions.utils import build_ws_ssl_context + + with patch("unraid_mcp.subscriptions.utils.UNRAID_VERIFY_SSL", True): + ctx = build_ws_ssl_context("wss://test.local/graphql") + assert isinstance(ctx, ssl.SSLContext) + assert ctx.check_hostname is True + + def test_wss_with_verify_false_disables_verification(self) -> None: + import ssl + + from unraid_mcp.subscriptions.utils import build_ws_ssl_context + + with patch("unraid_mcp.subscriptions.utils.UNRAID_VERIFY_SSL", False): + ctx = build_ws_ssl_context("wss://test.local/graphql") + assert isinstance(ctx, ssl.SSLContext) + assert ctx.check_hostname is False + assert ctx.verify_mode == ssl.CERT_NONE + + def test_wss_with_ca_bundle_path(self) -> None: + import ssl + + from unraid_mcp.subscriptions.utils import build_ws_ssl_context + + with ( + patch("unraid_mcp.subscriptions.utils.UNRAID_VERIFY_SSL", "/path/to/ca-bundle.crt"), + patch("ssl.create_default_context") as mock_ctx, + ): + build_ws_ssl_context("wss://test.local/graphql") + mock_ctx.assert_called_once_with(cafile="/path/to/ca-bundle.crt") diff --git a/tests/safety/__init__.py b/tests/safety/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/safety/test_destructive_guards.py b/tests/safety/test_destructive_guards.py new file mode 100644 index 0000000..14d66f3 --- /dev/null +++ b/tests/safety/test_destructive_guards.py @@ -0,0 +1,324 @@ +"""Safety audit tests for destructive action confirmation guards. + +Verifies that all destructive operations across every tool require +explicit `confirm=True` before execution, and that the DESTRUCTIVE_ACTIONS +registries are complete and consistent. +""" + +from collections.abc import Generator +from unittest.mock import AsyncMock, patch + +import pytest + +from unraid_mcp.core.exceptions import ToolError + +# Import DESTRUCTIVE_ACTIONS sets from every tool module that defines one +from unraid_mcp.tools.docker import DESTRUCTIVE_ACTIONS as DOCKER_DESTRUCTIVE +from unraid_mcp.tools.docker import MUTATIONS as DOCKER_MUTATIONS +from unraid_mcp.tools.keys import DESTRUCTIVE_ACTIONS as KEYS_DESTRUCTIVE +from unraid_mcp.tools.keys import MUTATIONS as KEYS_MUTATIONS +from unraid_mcp.tools.notifications import DESTRUCTIVE_ACTIONS as NOTIF_DESTRUCTIVE +from unraid_mcp.tools.notifications import MUTATIONS as NOTIF_MUTATIONS +from unraid_mcp.tools.rclone import DESTRUCTIVE_ACTIONS as RCLONE_DESTRUCTIVE +from unraid_mcp.tools.rclone import MUTATIONS as RCLONE_MUTATIONS +from unraid_mcp.tools.virtualization import DESTRUCTIVE_ACTIONS as VM_DESTRUCTIVE +from unraid_mcp.tools.virtualization import MUTATIONS as VM_MUTATIONS + +# Centralized import for make_tool_fn helper +# conftest.py sits in tests/ and is importable without __init__.py +from conftest import make_tool_fn + + +# --------------------------------------------------------------------------- +# Known destructive actions registry (ground truth for this audit) +# --------------------------------------------------------------------------- + +# Every destructive action in the codebase, keyed by (tool_module, tool_name) +KNOWN_DESTRUCTIVE: dict[str, dict[str, set[str]]] = { + "docker": { + "module": "unraid_mcp.tools.docker", + "register_fn": "register_docker_tool", + "tool_name": "unraid_docker", + "actions": {"remove"}, + "runtime_set": DOCKER_DESTRUCTIVE, + }, + "vm": { + "module": "unraid_mcp.tools.virtualization", + "register_fn": "register_vm_tool", + "tool_name": "unraid_vm", + "actions": {"force_stop", "reset"}, + "runtime_set": VM_DESTRUCTIVE, + }, + "notifications": { + "module": "unraid_mcp.tools.notifications", + "register_fn": "register_notifications_tool", + "tool_name": "unraid_notifications", + "actions": {"delete", "delete_archived"}, + "runtime_set": NOTIF_DESTRUCTIVE, + }, + "rclone": { + "module": "unraid_mcp.tools.rclone", + "register_fn": "register_rclone_tool", + "tool_name": "unraid_rclone", + "actions": {"delete_remote"}, + "runtime_set": RCLONE_DESTRUCTIVE, + }, + "keys": { + "module": "unraid_mcp.tools.keys", + "register_fn": "register_keys_tool", + "tool_name": "unraid_keys", + "actions": {"delete"}, + "runtime_set": KEYS_DESTRUCTIVE, + }, +} + + +# --------------------------------------------------------------------------- +# Registry validation: DESTRUCTIVE_ACTIONS sets match ground truth +# --------------------------------------------------------------------------- + + +class TestDestructiveActionRegistries: + """Verify that DESTRUCTIVE_ACTIONS sets in source code match the audit.""" + + @pytest.mark.parametrize("tool_key", list(KNOWN_DESTRUCTIVE.keys())) + def test_destructive_set_matches_audit(self, tool_key: str) -> None: + """Each tool's DESTRUCTIVE_ACTIONS must exactly match the audited set.""" + info = KNOWN_DESTRUCTIVE[tool_key] + assert info["runtime_set"] == info["actions"], ( + f"{tool_key}: DESTRUCTIVE_ACTIONS is {info['runtime_set']}, " + f"expected {info['actions']}" + ) + + @pytest.mark.parametrize("tool_key", list(KNOWN_DESTRUCTIVE.keys())) + def test_destructive_actions_are_valid_mutations(self, tool_key: str) -> None: + """Every destructive action must correspond to an actual mutation.""" + info = KNOWN_DESTRUCTIVE[tool_key] + mutations_map = { + "docker": DOCKER_MUTATIONS, + "vm": VM_MUTATIONS, + "notifications": NOTIF_MUTATIONS, + "rclone": RCLONE_MUTATIONS, + "keys": KEYS_MUTATIONS, + } + mutations = mutations_map[tool_key] + for action in info["actions"]: + assert action in mutations, ( + f"{tool_key}: destructive action '{action}' is not in MUTATIONS" + ) + + def test_no_delete_or_remove_mutations_missing_from_destructive(self) -> None: + """Any mutation with 'delete' or 'remove' in its name should be destructive.""" + all_mutations = { + "docker": DOCKER_MUTATIONS, + "vm": VM_MUTATIONS, + "notifications": NOTIF_MUTATIONS, + "rclone": RCLONE_MUTATIONS, + "keys": KEYS_MUTATIONS, + } + all_destructive = { + "docker": DOCKER_DESTRUCTIVE, + "vm": VM_DESTRUCTIVE, + "notifications": NOTIF_DESTRUCTIVE, + "rclone": RCLONE_DESTRUCTIVE, + "keys": KEYS_DESTRUCTIVE, + } + missing: list[str] = [] + for tool_key, mutations in all_mutations.items(): + destructive = all_destructive[tool_key] + for action_name in mutations: + if ("delete" in action_name or "remove" in action_name) and action_name not in destructive: + missing.append(f"{tool_key}/{action_name}") + assert not missing, ( + f"Mutations with 'delete'/'remove' not in DESTRUCTIVE_ACTIONS: {missing}" + ) + + +# --------------------------------------------------------------------------- +# Confirmation guard tests: calling without confirm=True raises ToolError +# --------------------------------------------------------------------------- + +# Build parametrized test cases: (tool_key, action, kwargs_without_confirm) +# Each destructive action needs the minimum required params (minus confirm) +_DESTRUCTIVE_TEST_CASES: list[tuple[str, str, dict]] = [ + # Docker + ("docker", "remove", {"container_id": "abc123"}), + # VM + ("vm", "force_stop", {"vm_id": "test-vm-uuid"}), + ("vm", "reset", {"vm_id": "test-vm-uuid"}), + # Notifications + ("notifications", "delete", {"notification_id": "notif-1", "notification_type": "UNREAD"}), + ("notifications", "delete_archived", {}), + # RClone + ("rclone", "delete_remote", {"name": "my-remote"}), + # Keys + ("keys", "delete", {"key_id": "key-123"}), +] + + +_CASE_IDS = [f"{c[0]}/{c[1]}" for c in _DESTRUCTIVE_TEST_CASES] + + +@pytest.fixture +def _mock_docker_graphql() -> Generator[AsyncMock, None, None]: + with patch("unraid_mcp.tools.docker.make_graphql_request", new_callable=AsyncMock) as m: + yield m + + +@pytest.fixture +def _mock_vm_graphql() -> Generator[AsyncMock, None, None]: + with patch("unraid_mcp.tools.virtualization.make_graphql_request", new_callable=AsyncMock) as m: + yield m + + +@pytest.fixture +def _mock_notif_graphql() -> Generator[AsyncMock, None, None]: + with patch("unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock) as m: + yield m + + +@pytest.fixture +def _mock_rclone_graphql() -> Generator[AsyncMock, None, None]: + with patch("unraid_mcp.tools.rclone.make_graphql_request", new_callable=AsyncMock) as m: + yield m + + +@pytest.fixture +def _mock_keys_graphql() -> Generator[AsyncMock, None, None]: + with patch("unraid_mcp.tools.keys.make_graphql_request", new_callable=AsyncMock) as m: + yield m + + +# Map tool_key -> (fixture name, module path, register fn, tool name) +_TOOL_REGISTRY = { + "docker": ("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker"), + "vm": ("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm"), + "notifications": ("unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications"), + "rclone": ("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone"), + "keys": ("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys"), +} + + +class TestConfirmationGuards: + """Every destructive action must reject calls without confirm=True.""" + + @pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS) + async def test_rejects_without_confirm( + self, + tool_key: str, + action: str, + kwargs: dict, + _mock_docker_graphql: AsyncMock, + _mock_vm_graphql: AsyncMock, + _mock_notif_graphql: AsyncMock, + _mock_rclone_graphql: AsyncMock, + _mock_keys_graphql: AsyncMock, + ) -> None: + """Calling a destructive action without confirm=True must raise ToolError.""" + module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key] + tool_fn = make_tool_fn(module_path, register_fn, tool_name) + + with pytest.raises(ToolError, match="confirm=True"): + await tool_fn(action=action, **kwargs) + + @pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS) + async def test_rejects_with_confirm_false( + self, + tool_key: str, + action: str, + kwargs: dict, + _mock_docker_graphql: AsyncMock, + _mock_vm_graphql: AsyncMock, + _mock_notif_graphql: AsyncMock, + _mock_rclone_graphql: AsyncMock, + _mock_keys_graphql: AsyncMock, + ) -> None: + """Explicitly passing confirm=False must still raise ToolError.""" + module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key] + tool_fn = make_tool_fn(module_path, register_fn, tool_name) + + with pytest.raises(ToolError, match="destructive"): + await tool_fn(action=action, confirm=False, **kwargs) + + @pytest.mark.parametrize("tool_key,action,kwargs", _DESTRUCTIVE_TEST_CASES, ids=_CASE_IDS) + async def test_error_message_includes_action_name( + self, + tool_key: str, + action: str, + kwargs: dict, + _mock_docker_graphql: AsyncMock, + _mock_vm_graphql: AsyncMock, + _mock_notif_graphql: AsyncMock, + _mock_rclone_graphql: AsyncMock, + _mock_keys_graphql: AsyncMock, + ) -> None: + """The error message should include the action name for clarity.""" + module_path, register_fn, tool_name = _TOOL_REGISTRY[tool_key] + tool_fn = make_tool_fn(module_path, register_fn, tool_name) + + with pytest.raises(ToolError, match=action): + await tool_fn(action=action, **kwargs) + + +# --------------------------------------------------------------------------- +# Positive tests: destructive actions proceed when confirm=True +# --------------------------------------------------------------------------- + + +class TestConfirmAllowsExecution: + """Destructive actions with confirm=True should reach the GraphQL layer.""" + + async def test_docker_remove_with_confirm(self, _mock_docker_graphql: AsyncMock) -> None: + cid = "a" * 64 + ":local" + _mock_docker_graphql.side_effect = [ + {"docker": {"containers": [{"id": cid, "names": ["old-app"]}]}}, + {"docker": {"removeContainer": True}}, + ] + tool_fn = make_tool_fn("unraid_mcp.tools.docker", "register_docker_tool", "unraid_docker") + result = await tool_fn(action="remove", container_id="old-app", confirm=True) + assert result["success"] is True + + async def test_vm_force_stop_with_confirm(self, _mock_vm_graphql: AsyncMock) -> None: + _mock_vm_graphql.return_value = {"vm": {"forceStop": True}} + tool_fn = make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm") + result = await tool_fn(action="force_stop", vm_id="test-uuid", confirm=True) + assert result["success"] is True + + async def test_vm_reset_with_confirm(self, _mock_vm_graphql: AsyncMock) -> None: + _mock_vm_graphql.return_value = {"vm": {"reset": True}} + tool_fn = make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm") + result = await tool_fn(action="reset", vm_id="test-uuid", confirm=True) + assert result["success"] is True + + async def test_notifications_delete_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None: + _mock_notif_graphql.return_value = {"notifications": {"deleteNotification": True}} + tool_fn = make_tool_fn( + "unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications" + ) + result = await tool_fn( + action="delete", + notification_id="notif-1", + notification_type="UNREAD", + confirm=True, + ) + assert result["success"] is True + + async def test_notifications_delete_archived_with_confirm(self, _mock_notif_graphql: AsyncMock) -> None: + _mock_notif_graphql.return_value = {"notifications": {"deleteArchivedNotifications": True}} + tool_fn = make_tool_fn( + "unraid_mcp.tools.notifications", "register_notifications_tool", "unraid_notifications" + ) + result = await tool_fn(action="delete_archived", confirm=True) + assert result["success"] is True + + async def test_rclone_delete_remote_with_confirm(self, _mock_rclone_graphql: AsyncMock) -> None: + _mock_rclone_graphql.return_value = {"rclone": {"deleteRCloneRemote": True}} + tool_fn = make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone") + result = await tool_fn(action="delete_remote", name="my-remote", confirm=True) + assert result["success"] is True + + async def test_keys_delete_with_confirm(self, _mock_keys_graphql: AsyncMock) -> None: + _mock_keys_graphql.return_value = {"deleteApiKeys": True} + tool_fn = make_tool_fn("unraid_mcp.tools.keys", "register_keys_tool", "unraid_keys") + result = await tool_fn(action="delete", key_id="key-123", confirm=True) + assert result["success"] is True diff --git a/tests/test_array.py b/tests/test_array.py index bc3cdc3..5f22c6a 100644 --- a/tests/test_array.py +++ b/tests/test_array.py @@ -20,26 +20,33 @@ def _make_tool(): class TestArrayValidation: - async def test_destructive_action_requires_confirm(self, _mock_graphql: AsyncMock) -> None: + async def test_invalid_action_rejected(self, _mock_graphql: AsyncMock) -> None: tool_fn = _make_tool() - for action in ("start", "stop", "shutdown", "reboot"): - with pytest.raises(ToolError, match="destructive"): - await tool_fn(action=action) + with pytest.raises(ToolError, match="Invalid action"): + await tool_fn(action="start") - async def test_disk_action_requires_disk_id(self, _mock_graphql: AsyncMock) -> None: + async def test_removed_actions_are_invalid(self, _mock_graphql: AsyncMock) -> None: tool_fn = _make_tool() - for action in ("mount_disk", "unmount_disk", "clear_stats"): - with pytest.raises(ToolError, match="disk_id"): + for action in ( + "start", + "stop", + "shutdown", + "reboot", + "mount_disk", + "unmount_disk", + "clear_stats", + ): + with pytest.raises(ToolError, match="Invalid action"): await tool_fn(action=action) class TestArrayActions: - async def test_start_array(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"setState": {"state": "STARTED"}} + async def test_parity_start(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"parityCheck": {"start": True}} tool_fn = _make_tool() - result = await tool_fn(action="start", confirm=True) + result = await tool_fn(action="parity_start") assert result["success"] is True - assert result["action"] == "start" + assert result["action"] == "parity_start" _mock_graphql.assert_called_once() async def test_parity_start_with_correct(self, _mock_graphql: AsyncMock) -> None: @@ -56,45 +63,22 @@ class TestArrayActions: result = await tool_fn(action="parity_status") assert result["success"] is True - async def test_mount_disk(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"mountArrayDisk": True} - tool_fn = _make_tool() - result = await tool_fn(action="mount_disk", disk_id="disk:1") - assert result["success"] is True - call_args = _mock_graphql.call_args - assert call_args[0][1] == {"id": "disk:1"} - - async def test_shutdown(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"shutdown": True} - tool_fn = _make_tool() - result = await tool_fn(action="shutdown", confirm=True) - assert result["success"] is True - assert result["action"] == "shutdown" - - async def test_stop_array(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"setState": {"state": "STOPPED"}} - tool_fn = _make_tool() - result = await tool_fn(action="stop", confirm=True) - assert result["success"] is True - assert result["action"] == "stop" - - async def test_reboot(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"reboot": True} - tool_fn = _make_tool() - result = await tool_fn(action="reboot", confirm=True) - assert result["success"] is True - assert result["action"] == "reboot" - async def test_parity_pause(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = {"parityCheck": {"pause": True}} tool_fn = _make_tool() result = await tool_fn(action="parity_pause") assert result["success"] is True - async def test_unmount_disk(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"unmountArrayDisk": True} + async def test_parity_resume(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"parityCheck": {"resume": True}} tool_fn = _make_tool() - result = await tool_fn(action="unmount_disk", disk_id="disk:1") + result = await tool_fn(action="parity_resume") + assert result["success"] is True + + async def test_parity_cancel(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"parityCheck": {"cancel": True}} + tool_fn = _make_tool() + result = await tool_fn(action="parity_cancel") assert result["success"] is True async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None: @@ -107,63 +91,46 @@ class TestArrayActions: class TestArrayMutationFailures: """Tests for mutation responses that indicate failure.""" - async def test_start_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None: - """Mutation returning False in the response field should still succeed (the tool - wraps the raw response; it doesn't inspect the inner boolean).""" - _mock_graphql.return_value = {"setState": False} + async def test_parity_start_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"parityCheck": {"start": False}} tool_fn = _make_tool() - result = await tool_fn(action="start", confirm=True) + result = await tool_fn(action="parity_start") assert result["success"] is True - assert result["data"] == {"setState": False} + assert result["data"] == {"parityCheck": {"start": False}} - async def test_start_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None: - """Mutation returning null for the response field.""" - _mock_graphql.return_value = {"setState": None} + async def test_parity_start_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"parityCheck": {"start": None}} tool_fn = _make_tool() - result = await tool_fn(action="start", confirm=True) + result = await tool_fn(action="parity_start") assert result["success"] is True - assert result["data"] == {"setState": None} + assert result["data"] == {"parityCheck": {"start": None}} - async def test_start_mutation_returns_empty_object(self, _mock_graphql: AsyncMock) -> None: - """Mutation returning an empty object for the response field.""" - _mock_graphql.return_value = {"setState": {}} + async def test_parity_start_mutation_returns_empty_object( + self, _mock_graphql: AsyncMock + ) -> None: + _mock_graphql.return_value = {"parityCheck": {"start": {}}} tool_fn = _make_tool() - result = await tool_fn(action="start", confirm=True) + result = await tool_fn(action="parity_start") assert result["success"] is True - assert result["data"] == {"setState": {}} - - async def test_mount_disk_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None: - """mountArrayDisk returning False indicates mount failed.""" - _mock_graphql.return_value = {"mountArrayDisk": False} - tool_fn = _make_tool() - result = await tool_fn(action="mount_disk", disk_id="disk:1") - assert result["success"] is True - assert result["data"]["mountArrayDisk"] is False + assert result["data"] == {"parityCheck": {"start": {}}} async def test_mutation_timeout(self, _mock_graphql: AsyncMock) -> None: - """Mid-operation timeout should be wrapped in ToolError.""" - _mock_graphql.side_effect = TimeoutError("operation timed out") tool_fn = _make_tool() with pytest.raises(ToolError, match="timed out"): - await tool_fn(action="shutdown", confirm=True) + await tool_fn(action="parity_cancel") class TestArrayNetworkErrors: """Tests for network-level failures in array operations.""" async def test_http_500_server_error(self, _mock_graphql: AsyncMock) -> None: - """HTTP 500 from the API should be wrapped in ToolError.""" - mock_response = AsyncMock() - mock_response.status_code = 500 - mock_response.text = "Internal Server Error" _mock_graphql.side_effect = ToolError("HTTP error 500: Internal Server Error") tool_fn = _make_tool() with pytest.raises(ToolError, match="HTTP error 500"): - await tool_fn(action="start", confirm=True) + await tool_fn(action="parity_start") async def test_connection_refused(self, _mock_graphql: AsyncMock) -> None: - """Connection refused should be wrapped in ToolError.""" _mock_graphql.side_effect = ToolError("Network connection error: Connection refused") tool_fn = _make_tool() with pytest.raises(ToolError, match="Network connection error"): diff --git a/tests/test_client.py b/tests/test_client.py index 39f9c12..b144b75 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -368,9 +368,7 @@ class TestGraphQLErrorHandling: async def test_graphql_error_raises_tool_error(self) -> None: mock_response = MagicMock() mock_response.raise_for_status = MagicMock() - mock_response.json.return_value = { - "errors": [{"message": "Field 'bogus' not found"}] - } + mock_response.json.return_value = {"errors": [{"message": "Field 'bogus' not found"}]} mock_client = AsyncMock() mock_client.post.return_value = mock_response @@ -403,9 +401,7 @@ class TestGraphQLErrorHandling: async def test_idempotent_start_returns_success(self) -> None: mock_response = MagicMock() mock_response.raise_for_status = MagicMock() - mock_response.json.return_value = { - "errors": [{"message": "Container already running"}] - } + mock_response.json.return_value = {"errors": [{"message": "Container already running"}]} mock_client = AsyncMock() mock_client.post.return_value = mock_response @@ -421,9 +417,7 @@ class TestGraphQLErrorHandling: async def test_idempotent_stop_returns_success(self) -> None: mock_response = MagicMock() mock_response.raise_for_status = MagicMock() - mock_response.json.return_value = { - "errors": [{"message": "Container not running"}] - } + mock_response.json.return_value = {"errors": [{"message": "Container not running"}]} mock_client = AsyncMock() mock_client.post.return_value = mock_response @@ -440,9 +434,7 @@ class TestGraphQLErrorHandling: """An error that doesn't match idempotent patterns still raises even with context.""" mock_response = MagicMock() mock_response.raise_for_status = MagicMock() - mock_response.json.return_value = { - "errors": [{"message": "Permission denied"}] - } + mock_response.json.return_value = {"errors": [{"message": "Permission denied"}]} mock_client = AsyncMock() mock_client.post.return_value = mock_response diff --git a/tests/test_docker.py b/tests/test_docker.py index 6281e72..35c4244 100644 --- a/tests/test_docker.py +++ b/tests/test_docker.py @@ -93,8 +93,21 @@ class TestDockerActions: async def test_start_container(self, _mock_graphql: AsyncMock) -> None: # First call resolves ID, second performs start _mock_graphql.side_effect = [ - {"docker": {"containers": [{"id": "abc123def456" * 4 + "abcd1234abcd1234:local", "names": ["plex"]}]}}, - {"docker": {"start": {"id": "abc123def456" * 4 + "abcd1234abcd1234:local", "state": "running"}}}, + { + "docker": { + "containers": [ + {"id": "abc123def456" * 4 + "abcd1234abcd1234:local", "names": ["plex"]} + ] + } + }, + { + "docker": { + "start": { + "id": "abc123def456" * 4 + "abcd1234abcd1234:local", + "state": "running", + } + } + }, ] tool_fn = _make_tool() result = await tool_fn(action="start", container_id="plex") @@ -114,7 +127,9 @@ class TestDockerActions: async def test_check_updates(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "docker": {"containerUpdateStatuses": [{"id": "c1", "name": "plex", "updateAvailable": True}]} + "docker": { + "containerUpdateStatuses": [{"id": "c1", "name": "plex", "updateAvailable": True}] + } } tool_fn = _make_tool() result = await tool_fn(action="check_updates") @@ -175,7 +190,11 @@ class TestDockerActions: async def test_details_found(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "docker": {"containers": [{"id": "c1", "names": ["plex"], "state": "running", "image": "plexinc/pms"}]} + "docker": { + "containers": [ + {"id": "c1", "names": ["plex"], "state": "running", "image": "plexinc/pms"} + ] + } } tool_fn = _make_tool() result = await tool_fn(action="details", container_id="plex") diff --git a/tests/test_health.py b/tests/test_health.py index 1cd800c..b0e978a 100644 --- a/tests/test_health.py +++ b/tests/test_health.py @@ -44,12 +44,8 @@ class TestHealthActions: "os": {"uptime": 86400}, }, "array": {"state": "STARTED"}, - "notifications": { - "overview": {"unread": {"alert": 0, "warning": 0, "total": 3}} - }, - "docker": { - "containers": [{"id": "c1", "state": "running", "status": "Up 2 days"}] - }, + "notifications": {"overview": {"unread": {"alert": 0, "warning": 0, "total": 3}}}, + "docker": {"containers": [{"id": "c1", "state": "running", "status": "Up 2 days"}]}, } tool_fn = _make_tool() result = await tool_fn(action="check") @@ -60,9 +56,7 @@ class TestHealthActions: _mock_graphql.return_value = { "info": {"machineId": "abc", "versions": {"unraid": "7.2"}, "os": {"uptime": 100}}, "array": {"state": "STARTED"}, - "notifications": { - "overview": {"unread": {"alert": 3, "warning": 0, "total": 3}} - }, + "notifications": {"overview": {"unread": {"alert": 3, "warning": 0, "total": 3}}}, "docker": {"containers": []}, } tool_fn = _make_tool() @@ -88,9 +82,7 @@ class TestHealthActions: _mock_graphql.return_value = { "info": {}, "array": {"state": "STARTED"}, - "notifications": { - "overview": {"unread": {"alert": 5, "warning": 0, "total": 5}} - }, + "notifications": {"overview": {"unread": {"alert": 5, "warning": 0, "total": 5}}}, "docker": {"containers": []}, } tool_fn = _make_tool() @@ -102,10 +94,13 @@ class TestHealthActions: async def test_diagnose_wraps_exception(self, _mock_graphql: AsyncMock) -> None: """When _diagnose_subscriptions raises, tool wraps in ToolError.""" tool_fn = _make_tool() - with patch( - "unraid_mcp.tools.health._diagnose_subscriptions", - side_effect=RuntimeError("broken"), - ), pytest.raises(ToolError, match="broken"): + with ( + patch( + "unraid_mcp.tools.health._diagnose_subscriptions", + side_effect=RuntimeError("broken"), + ), + pytest.raises(ToolError, match="broken"), + ): await tool_fn(action="diagnose") async def test_diagnose_success(self, _mock_graphql: AsyncMock) -> None: @@ -131,11 +126,14 @@ class TestHealthActions: try: # Replace the modules with objects that raise ImportError on access - with patch.dict(sys.modules, { - "unraid_mcp.subscriptions": None, - "unraid_mcp.subscriptions.manager": None, - "unraid_mcp.subscriptions.resources": None, - }): + with patch.dict( + sys.modules, + { + "unraid_mcp.subscriptions": None, + "unraid_mcp.subscriptions.manager": None, + "unraid_mcp.subscriptions.resources": None, + }, + ): result = await _diagnose_subscriptions() assert "error" in result finally: diff --git a/tests/test_info.py b/tests/test_info.py index bbe9f6d..02fc3ea 100644 --- a/tests/test_info.py +++ b/tests/test_info.py @@ -20,7 +20,14 @@ from unraid_mcp.tools.info import ( class TestProcessSystemInfo: def test_processes_os_info(self) -> None: raw = { - "os": {"distro": "Unraid", "release": "7.2", "platform": "linux", "arch": "x86_64", "hostname": "tower", "uptime": 3600}, + "os": { + "distro": "Unraid", + "release": "7.2", + "platform": "linux", + "arch": "x86_64", + "hostname": "tower", + "uptime": 3600, + }, "cpu": {"manufacturer": "AMD", "brand": "Ryzen", "cores": 8, "threads": 16}, } result = _process_system_info(raw) @@ -34,7 +41,19 @@ class TestProcessSystemInfo: assert result["summary"] == {"memory_summary": "Memory information not available."} def test_processes_memory_layout(self) -> None: - raw = {"memory": {"layout": [{"bank": "0", "type": "DDR4", "clockSpeed": 3200, "manufacturer": "G.Skill", "partNum": "XYZ"}]}} + raw = { + "memory": { + "layout": [ + { + "bank": "0", + "type": "DDR4", + "clockSpeed": 3200, + "manufacturer": "G.Skill", + "partNum": "XYZ", + } + ] + } + } result = _process_system_info(raw) assert len(result["summary"]["memory_layout_details"]) == 1 @@ -130,7 +149,13 @@ class TestUnraidInfoTool: async def test_overview_action(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { "info": { - "os": {"distro": "Unraid", "release": "7.2", "platform": "linux", "arch": "x86_64", "hostname": "test"}, + "os": { + "distro": "Unraid", + "release": "7.2", + "platform": "linux", + "arch": "x86_64", + "hostname": "test", + }, "cpu": {"manufacturer": "Intel", "brand": "i7", "cores": 4, "threads": 8}, } } @@ -165,7 +190,9 @@ class TestUnraidInfoTool: await tool_fn(action="online") async def test_metrics(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"metrics": {"cpu": {"used": 25.5}, "memory": {"used": 8192, "total": 32768}}} + _mock_graphql.return_value = { + "metrics": {"cpu": {"used": 25.5}, "memory": {"used": 8192, "total": 32768}} + } tool_fn = _make_tool() result = await tool_fn(action="metrics") assert result["cpu"]["used"] == 25.5 @@ -178,7 +205,9 @@ class TestUnraidInfoTool: assert result["services"][0]["name"] == "docker" async def test_settings(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"settings": {"unified": {"values": {"timezone": "US/Eastern"}}}} + _mock_graphql.return_value = { + "settings": {"unified": {"values": {"timezone": "US/Eastern"}}} + } tool_fn = _make_tool() result = await tool_fn(action="settings") assert result["timezone"] == "US/Eastern" @@ -191,20 +220,32 @@ class TestUnraidInfoTool: assert result == {"raw": "raw_string"} async def test_servers(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"servers": [{"id": "s:1", "name": "tower", "status": "online"}]} + _mock_graphql.return_value = { + "servers": [{"id": "s:1", "name": "tower", "status": "online"}] + } tool_fn = _make_tool() result = await tool_fn(action="servers") assert len(result["servers"]) == 1 assert result["servers"][0]["name"] == "tower" async def test_flash(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"flash": {"id": "f:1", "guid": "abc", "product": "SanDisk", "vendor": "SanDisk", "size": 32000000000}} + _mock_graphql.return_value = { + "flash": { + "id": "f:1", + "guid": "abc", + "product": "SanDisk", + "vendor": "SanDisk", + "size": 32000000000, + } + } tool_fn = _make_tool() result = await tool_fn(action="flash") assert result["product"] == "SanDisk" async def test_ups_devices(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"upsDevices": [{"id": "ups:1", "model": "APC", "status": "online", "charge": 100}]} + _mock_graphql.return_value = { + "upsDevices": [{"id": "ups:1", "model": "APC", "status": "online", "charge": 100}] + } tool_fn = _make_tool() result = await tool_fn(action="ups_devices") assert len(result["ups_devices"]) == 1 diff --git a/tests/test_keys.py b/tests/test_keys.py index 84c5ab1..3d7ab5e 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -56,7 +56,9 @@ class TestKeysActions: assert len(result["keys"]) == 1 async def test_get(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"apiKey": {"id": "k:1", "name": "mcp-key", "roles": ["admin"]}} + _mock_graphql.return_value = { + "apiKey": {"id": "k:1", "name": "mcp-key", "roles": ["admin"]} + } tool_fn = _make_tool() result = await tool_fn(action="get", key_id="k:1") assert result["name"] == "mcp-key" @@ -72,7 +74,12 @@ class TestKeysActions: async def test_create_with_roles(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "createApiKey": {"id": "k:new", "name": "admin-key", "key": "secret", "roles": ["admin"]} + "createApiKey": { + "id": "k:new", + "name": "admin-key", + "key": "secret", + "roles": ["admin"], + } } tool_fn = _make_tool() result = await tool_fn(action="create", name="admin-key", roles=["admin"]) diff --git a/tests/test_notifications.py b/tests/test_notifications.py index e9d32b9..0ad9dc3 100644 --- a/tests/test_notifications.py +++ b/tests/test_notifications.py @@ -11,7 +11,9 @@ from unraid_mcp.core.exceptions import ToolError @pytest.fixture def _mock_graphql() -> Generator[AsyncMock, None, None]: - with patch("unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock) as mock: + with patch( + "unraid_mcp.tools.notifications.make_graphql_request", new_callable=AsyncMock + ) as mock: yield mock @@ -64,9 +66,7 @@ class TestNotificationsActions: async def test_list(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "notifications": { - "list": [{"id": "n:1", "title": "Test", "importance": "INFO"}] - } + "notifications": {"list": [{"id": "n:1", "title": "Test", "importance": "INFO"}]} } tool_fn = _make_tool() result = await tool_fn(action="list") @@ -82,7 +82,9 @@ class TestNotificationsActions: async def test_create(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "notifications": {"createNotification": {"id": "n:new", "title": "Test", "importance": "INFO"}} + "notifications": { + "createNotification": {"id": "n:new", "title": "Test", "importance": "INFO"} + } } tool_fn = _make_tool() result = await tool_fn( @@ -126,9 +128,7 @@ class TestNotificationsActions: async def test_list_with_importance_filter(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "notifications": { - "list": [{"id": "n:1", "title": "Alert", "importance": "WARNING"}] - } + "notifications": {"list": [{"id": "n:1", "title": "Alert", "importance": "WARNING"}]} } tool_fn = _make_tool() result = await tool_fn(action="list", importance="warning", limit=10, offset=5) diff --git a/tests/test_rclone.py b/tests/test_rclone.py index 48cf20b..45a0477 100644 --- a/tests/test_rclone.py +++ b/tests/test_rclone.py @@ -39,9 +39,7 @@ class TestRcloneValidation: class TestRcloneActions: async def test_list_remotes(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = { - "rclone": {"remotes": [{"name": "gdrive", "type": "drive"}]} - } + _mock_graphql.return_value = {"rclone": {"remotes": [{"name": "gdrive", "type": "drive"}]}} tool_fn = _make_tool() result = await tool_fn(action="list_remotes") assert len(result["remotes"]) == 1 diff --git a/tests/test_storage.py b/tests/test_storage.py index cbe554f..6c9812f 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -95,7 +95,14 @@ class TestStorageActions: async def test_disk_details(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "disk": {"id": "d:1", "device": "sda", "name": "WD", "serialNum": "SN1", "size": 1073741824, "temperature": 35} + "disk": { + "id": "d:1", + "device": "sda", + "name": "WD", + "serialNum": "SN1", + "size": 1073741824, + "temperature": 35, + } } tool_fn = _make_tool() result = await tool_fn(action="disk_details", disk_id="d:1") @@ -121,7 +128,9 @@ class TestStorageActions: assert len(result["log_files"]) == 1 async def test_logs(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "log line", "totalLines": 1}} + _mock_graphql.return_value = { + "logFile": {"path": "/var/log/syslog", "content": "log line", "totalLines": 1} + } tool_fn = _make_tool() result = await tool_fn(action="logs", log_path="/var/log/syslog") assert result["content"] == "log line" diff --git a/tests/test_users.py b/tests/test_users.py index bf0d4a3..534993f 100644 --- a/tests/test_users.py +++ b/tests/test_users.py @@ -1,4 +1,8 @@ -"""Tests for unraid_users tool.""" +"""Tests for unraid_users tool. + +NOTE: Unraid GraphQL API only supports the me() query. +User management operations (list, add, delete, cloud, remote_access, origins) are NOT available in the API. +""" from collections.abc import Generator from unittest.mock import AsyncMock, patch @@ -20,112 +24,54 @@ def _make_tool(): class TestUsersValidation: - async def test_delete_requires_confirm(self, _mock_graphql: AsyncMock) -> None: - tool_fn = _make_tool() - with pytest.raises(ToolError, match="destructive"): - await tool_fn(action="delete", user_id="u:1") + """Test validation for invalid actions.""" - async def test_get_requires_user_id(self, _mock_graphql: AsyncMock) -> None: + async def test_invalid_action_rejected(self, _mock_graphql: AsyncMock) -> None: + """Test that non-existent actions are rejected with clear error.""" tool_fn = _make_tool() - with pytest.raises(ToolError, match="user_id"): - await tool_fn(action="get") + with pytest.raises(ToolError, match="Invalid action"): + await tool_fn(action="list") - async def test_add_requires_name_and_password(self, _mock_graphql: AsyncMock) -> None: - tool_fn = _make_tool() - with pytest.raises(ToolError, match="requires name and password"): + with pytest.raises(ToolError, match="Invalid action"): await tool_fn(action="add") - async def test_delete_requires_user_id(self, _mock_graphql: AsyncMock) -> None: - tool_fn = _make_tool() - with pytest.raises(ToolError, match="user_id"): - await tool_fn(action="delete", confirm=True) + with pytest.raises(ToolError, match="Invalid action"): + await tool_fn(action="delete") + + with pytest.raises(ToolError, match="Invalid action"): + await tool_fn(action="cloud") class TestUsersActions: + """Test the single supported action: me.""" + async def test_me(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"me": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]}} + """Test querying current authenticated user.""" + _mock_graphql.return_value = { + "me": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]} + } tool_fn = _make_tool() result = await tool_fn(action="me") assert result["name"] == "root" + assert result["roles"] == ["ADMIN"] + _mock_graphql.assert_called_once() - async def test_list(self, _mock_graphql: AsyncMock) -> None: + async def test_me_default_action(self, _mock_graphql: AsyncMock) -> None: + """Test that 'me' is the default action.""" _mock_graphql.return_value = { - "users": [{"id": "u:1", "name": "root"}, {"id": "u:2", "name": "guest"}] + "me": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]} } tool_fn = _make_tool() - result = await tool_fn(action="list") - assert len(result["users"]) == 2 - - async def test_get(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"user": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]}} - tool_fn = _make_tool() - result = await tool_fn(action="get", user_id="u:1") + result = await tool_fn() assert result["name"] == "root" - async def test_add(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"addUser": {"id": "u:3", "name": "newuser", "description": "", "roles": ["USER"]}} - tool_fn = _make_tool() - result = await tool_fn(action="add", name="newuser", password="pass123") - assert result["success"] is True - - async def test_add_with_role(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"addUser": {"id": "u:3", "name": "admin2", "description": "", "roles": ["ADMIN"]}} - tool_fn = _make_tool() - result = await tool_fn(action="add", name="admin2", password="pass123", role="admin") - assert result["success"] is True - call_args = _mock_graphql.call_args - assert call_args[0][1]["input"]["role"] == "ADMIN" - - async def test_delete(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"deleteUser": {"id": "u:2", "name": "guest"}} - tool_fn = _make_tool() - result = await tool_fn(action="delete", user_id="u:2", confirm=True) - assert result["success"] is True - call_args = _mock_graphql.call_args - assert call_args[0][1]["input"]["id"] == "u:2" - - async def test_cloud(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"cloud": {"status": "connected", "apiKey": "***"}} - tool_fn = _make_tool() - result = await tool_fn(action="cloud") - assert result["status"] == "connected" - - async def test_remote_access(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"remoteAccess": {"enabled": True, "url": "https://example.com"}} - tool_fn = _make_tool() - result = await tool_fn(action="remote_access") - assert result["enabled"] is True - - async def test_origins(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"allowedOrigins": ["http://localhost", "https://example.com"]} - tool_fn = _make_tool() - result = await tool_fn(action="origins") - assert len(result["origins"]) == 2 - class TestUsersNoneHandling: """Verify actions return empty dict (not TypeError) when API returns None.""" async def test_me_returns_none(self, _mock_graphql: AsyncMock) -> None: + """Test that me returns empty dict when API returns None.""" _mock_graphql.return_value = {"me": None} tool_fn = _make_tool() result = await tool_fn(action="me") assert result == {} - - async def test_get_returns_none(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"user": None} - tool_fn = _make_tool() - result = await tool_fn(action="get", user_id="u:1") - assert result == {} - - async def test_cloud_returns_none(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"cloud": None} - tool_fn = _make_tool() - result = await tool_fn(action="cloud") - assert result == {} - - async def test_remote_access_returns_none(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"remoteAccess": None} - tool_fn = _make_tool() - result = await tool_fn(action="remote_access") - assert result == {} diff --git a/tests/test_vm.py b/tests/test_vm.py index 301918a..a18b2f3 100644 --- a/tests/test_vm.py +++ b/tests/test_vm.py @@ -11,7 +11,9 @@ from unraid_mcp.core.exceptions import ToolError @pytest.fixture def _mock_graphql() -> Generator[AsyncMock, None, None]: - with patch("unraid_mcp.tools.virtualization.make_graphql_request", new_callable=AsyncMock) as mock: + with patch( + "unraid_mcp.tools.virtualization.make_graphql_request", new_callable=AsyncMock + ) as mock: yield mock @@ -67,7 +69,9 @@ class TestVmActions: async def test_details_by_uuid(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "vms": {"domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}]} + "vms": { + "domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}] + } } tool_fn = _make_tool() result = await tool_fn(action="details", vm_id="uuid-1") @@ -75,7 +79,9 @@ class TestVmActions: async def test_details_by_name(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "vms": {"domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}]} + "vms": { + "domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}] + } } tool_fn = _make_tool() result = await tool_fn(action="details", vm_id="Win11") @@ -83,7 +89,9 @@ class TestVmActions: async def test_details_not_found(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = { - "vms": {"domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}]} + "vms": { + "domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}] + } } tool_fn = _make_tool() with pytest.raises(ToolError, match="not found"): diff --git a/unraid_mcp/config/logging.py b/unraid_mcp/config/logging.py index 7b7b25a..c6ed490 100644 --- a/unraid_mcp/config/logging.py +++ b/unraid_mcp/config/logging.py @@ -19,6 +19,7 @@ from rich.text import Text try: from fastmcp.utilities.logging import get_logger as get_fastmcp_logger + FASTMCP_AVAILABLE = True except ImportError: FASTMCP_AVAILABLE = False @@ -33,7 +34,7 @@ console = Console(stderr=True, force_terminal=True) class OverwriteFileHandler(logging.FileHandler): """Custom file handler that overwrites the log file when it reaches max size.""" - def __init__(self, filename, max_bytes=10*1024*1024, mode="a", encoding=None, delay=False): + def __init__(self, filename, max_bytes=10 * 1024 * 1024, mode="a", encoding=None, delay=False): """Initialize the handler. Args: @@ -74,14 +75,17 @@ class OverwriteFileHandler(logging.FileHandler): lineno=0, msg="=== LOG FILE RESET (10MB limit reached) ===", args=(), - exc_info=None + exc_info=None, ) super().emit(reset_record) except OSError as e: import sys - print(f"WARNING: Log file size check failed: {e}. Continuing without rotation.", - file=sys.stderr) + + print( + f"WARNING: Log file size check failed: {e}. Continuing without rotation.", + file=sys.stderr, + ) # Emit the original record super().emit(record) @@ -114,17 +118,13 @@ def setup_logger(name: str = "UnraidMCPServer") -> logging.Logger: show_level=True, show_path=False, rich_tracebacks=True, - tracebacks_show_locals=True + tracebacks_show_locals=True, ) console_handler.setLevel(numeric_log_level) logger.addHandler(console_handler) # File Handler with 10MB cap (overwrites instead of rotating) - file_handler = OverwriteFileHandler( - LOG_FILE_PATH, - max_bytes=10*1024*1024, - encoding="utf-8" - ) + file_handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8") file_handler.setLevel(numeric_log_level) file_formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s" @@ -158,17 +158,13 @@ def configure_fastmcp_logger_with_rich() -> logging.Logger | None: show_path=False, rich_tracebacks=True, tracebacks_show_locals=True, - markup=True + markup=True, ) console_handler.setLevel(numeric_log_level) fastmcp_logger.addHandler(console_handler) # File Handler with 10MB cap (overwrites instead of rotating) - file_handler = OverwriteFileHandler( - LOG_FILE_PATH, - max_bytes=10*1024*1024, - encoding="utf-8" - ) + file_handler = OverwriteFileHandler(LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8") file_handler.setLevel(numeric_log_level) file_formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(funcName)s - %(lineno)d - %(message)s" @@ -191,16 +187,14 @@ def configure_fastmcp_logger_with_rich() -> logging.Logger | None: show_path=False, rich_tracebacks=True, tracebacks_show_locals=True, - markup=True + markup=True, ) root_console_handler.setLevel(numeric_log_level) root_logger.addHandler(root_console_handler) # File Handler for root logger with 10MB cap (overwrites instead of rotating) root_file_handler = OverwriteFileHandler( - LOG_FILE_PATH, - max_bytes=10*1024*1024, - encoding="utf-8" + LOG_FILE_PATH, max_bytes=10 * 1024 * 1024, encoding="utf-8" ) root_file_handler.setLevel(numeric_log_level) root_file_handler.setFormatter(file_formatter) @@ -255,16 +249,18 @@ def get_est_timestamp() -> str: now = datetime.now(est) return now.strftime("%y/%m/%d %H:%M:%S") + def log_header(title: str) -> None: """Print a beautiful header panel with Nordic blue styling.""" panel = Panel( Align.center(Text(title, style="bold white")), style="#5E81AC", # Nordic blue padding=(0, 2), - border_style="#81A1C1" # Light Nordic blue + border_style="#81A1C1", # Light Nordic blue ) console.print(panel) + def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0) -> None: """Log a message with specific level and indentation.""" timestamp = get_est_timestamp() @@ -272,15 +268,17 @@ def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0 # Enhanced Nordic color scheme with more blues level_config = { - "error": {"color": "#BF616A", "icon": "❌", "style": "bold"}, # Nordic red - "warning": {"color": "#EBCB8B", "icon": "⚠️", "style": ""}, # Nordic yellow - "success": {"color": "#A3BE8C", "icon": "✅", "style": "bold"}, # Nordic green + "error": {"color": "#BF616A", "icon": "❌", "style": "bold"}, # Nordic red + "warning": {"color": "#EBCB8B", "icon": "⚠️", "style": ""}, # Nordic yellow + "success": {"color": "#A3BE8C", "icon": "✅", "style": "bold"}, # Nordic green "info": {"color": "#5E81AC", "icon": "\u2139\ufe0f", "style": "bold"}, # Nordic blue (bold) - "status": {"color": "#81A1C1", "icon": "🔍", "style": ""}, # Light Nordic blue - "debug": {"color": "#4C566A", "icon": "🐛", "style": ""}, # Nordic dark gray + "status": {"color": "#81A1C1", "icon": "🔍", "style": ""}, # Light Nordic blue + "debug": {"color": "#4C566A", "icon": "🐛", "style": ""}, # Nordic dark gray } - config = level_config.get(level, {"color": "#81A1C1", "icon": "•", "style": ""}) # Default to light Nordic blue + config = level_config.get( + level, {"color": "#81A1C1", "icon": "•", "style": ""} + ) # Default to light Nordic blue # Create beautifully formatted text text = Text() @@ -308,26 +306,33 @@ def log_with_level_and_indent(message: str, level: str = "info", indent: int = 0 console.print(text) + def log_separator() -> None: """Print a beautiful separator line with Nordic blue styling.""" console.print(Rule(style="#81A1C1")) + # Convenience functions for different log levels def log_error(message: str, indent: int = 0) -> None: log_with_level_and_indent(message, "error", indent) + def log_warning(message: str, indent: int = 0) -> None: log_with_level_and_indent(message, "warning", indent) + def log_success(message: str, indent: int = 0) -> None: log_with_level_and_indent(message, "success", indent) + def log_info(message: str, indent: int = 0) -> None: log_with_level_and_indent(message, "info", indent) + def log_status(message: str, indent: int = 0) -> None: log_with_level_and_indent(message, "status", indent) + # Global logger instance - modules can import this directly if FASTMCP_AVAILABLE: # Use FastMCP logger with Rich formatting diff --git a/unraid_mcp/config/settings.py b/unraid_mcp/config/settings.py index eae08dd..e2cd869 100644 --- a/unraid_mcp/config/settings.py +++ b/unraid_mcp/config/settings.py @@ -22,7 +22,7 @@ dotenv_paths = [ Path("/app/.env.local"), # Container mount point PROJECT_ROOT / ".env.local", # Project root .env.local PROJECT_ROOT / ".env", # Project root .env - UNRAID_MCP_DIR / ".env" # Local .env in unraid_mcp/ + UNRAID_MCP_DIR / ".env", # Local .env in unraid_mcp/ ] for dotenv_path in dotenv_paths: @@ -73,10 +73,7 @@ def validate_required_config() -> tuple[bool, list[str]]: Returns: bool: True if all required config is present, False otherwise. """ - required_vars = [ - ("UNRAID_API_URL", UNRAID_API_URL), - ("UNRAID_API_KEY", UNRAID_API_KEY) - ] + required_vars = [("UNRAID_API_URL", UNRAID_API_URL), ("UNRAID_API_KEY", UNRAID_API_KEY)] missing = [] for name, value in required_vars: @@ -105,5 +102,5 @@ def get_config_summary() -> dict[str, Any]: "log_level": LOG_LEVEL_STR, "log_file": str(LOG_FILE_PATH), "config_valid": is_valid, - "missing_config": missing if not is_valid else None + "missing_config": missing if not is_valid else None, } diff --git a/unraid_mcp/core/client.py b/unraid_mcp/core/client.py index 9051bc6..805378b 100644 --- a/unraid_mcp/core/client.py +++ b/unraid_mcp/core/client.py @@ -34,7 +34,9 @@ def _is_sensitive_key(key: str) -> bool: def _redact_sensitive(obj: Any) -> Any: """Recursively redact sensitive values from nested dicts/lists.""" if isinstance(obj, dict): - return {k: ("***" if _is_sensitive_key(k) else _redact_sensitive(v)) for k, v in obj.items()} + return { + k: ("***" if _is_sensitive_key(k) else _redact_sensitive(v)) for k, v in obj.items() + } if isinstance(obj, list): return [_redact_sensitive(item) for item in obj] return obj @@ -62,6 +64,7 @@ def get_timeout_for_operation(profile: str) -> httpx.Timeout: """ return _TIMEOUT_PROFILES.get(profile, DEFAULT_TIMEOUT) + # Global connection pool (module-level singleton) _http_client: httpx.AsyncClient | None = None _client_lock = asyncio.Lock() @@ -82,16 +85,16 @@ def is_idempotent_error(error_message: str, operation: str) -> bool: # Docker container operation patterns if operation == "start": return ( - "already started" in error_lower or - "container already running" in error_lower or - "http code 304" in error_lower + "already started" in error_lower + or "container already running" in error_lower + or "http code 304" in error_lower ) if operation == "stop": return ( - "already stopped" in error_lower or - "container already stopped" in error_lower or - "container not running" in error_lower or - "http code 304" in error_lower + "already stopped" in error_lower + or "container already stopped" in error_lower + or "container not running" in error_lower + or "http code 304" in error_lower ) return False @@ -106,19 +109,14 @@ async def _create_http_client() -> httpx.AsyncClient: return httpx.AsyncClient( # Connection pool settings limits=httpx.Limits( - max_keepalive_connections=20, - max_connections=100, - keepalive_expiry=30.0 + max_keepalive_connections=20, max_connections=100, keepalive_expiry=30.0 ), # Default timeout (can be overridden per-request) timeout=DEFAULT_TIMEOUT, # SSL verification verify=UNRAID_VERIFY_SSL, # Connection pooling headers - headers={ - "Connection": "keep-alive", - "User-Agent": f"UnraidMCPServer/{VERSION}" - } + headers={"Connection": "keep-alive", "User-Agent": f"UnraidMCPServer/{VERSION}"}, ) @@ -136,7 +134,9 @@ async def get_http_client() -> httpx.AsyncClient: async with _client_lock: if _http_client is None or _http_client.is_closed: _http_client = await _create_http_client() - logger.info("Created shared HTTP client with connection pooling (20 keepalive, 100 max connections)") + logger.info( + "Created shared HTTP client with connection pooling (20 keepalive, 100 max connections)" + ) client = _http_client @@ -167,7 +167,7 @@ async def make_graphql_request( query: str, variables: dict[str, Any] | None = None, custom_timeout: httpx.Timeout | None = None, - operation_context: dict[str, str] | None = None + operation_context: dict[str, str] | None = None, ) -> dict[str, Any]: """Make GraphQL requests to the Unraid API. @@ -193,7 +193,7 @@ async def make_graphql_request( headers = { "Content-Type": "application/json", "X-API-Key": UNRAID_API_KEY, - "User-Agent": f"UnraidMCPServer/{VERSION}" # Custom user-agent + "User-Agent": f"UnraidMCPServer/{VERSION}", # Custom user-agent } payload: dict[str, Any] = {"query": query} @@ -212,10 +212,7 @@ async def make_graphql_request( # Override timeout if custom timeout specified if custom_timeout is not None: response = await client.post( - UNRAID_API_URL, - json=payload, - headers=headers, - timeout=custom_timeout + UNRAID_API_URL, json=payload, headers=headers, timeout=custom_timeout ) else: response = await client.post(UNRAID_API_URL, json=payload, headers=headers) @@ -224,19 +221,23 @@ async def make_graphql_request( response_data = response.json() if response_data.get("errors"): - error_details = "; ".join([err.get("message", str(err)) for err in response_data["errors"]]) + error_details = "; ".join( + [err.get("message", str(err)) for err in response_data["errors"]] + ) # Check if this is an idempotent error that should be treated as success if operation_context and operation_context.get("operation"): operation = operation_context["operation"] if is_idempotent_error(error_details, operation): - logger.warning(f"Idempotent operation '{operation}' - treating as success: {error_details}") + logger.warning( + f"Idempotent operation '{operation}' - treating as success: {error_details}" + ) # Return a success response with the current state information return { "idempotent_success": True, "operation": operation, "message": error_details, - "original_errors": response_data["errors"] + "original_errors": response_data["errors"], } logger.error(f"GraphQL API returned errors: {response_data['errors']}") diff --git a/unraid_mcp/core/exceptions.py b/unraid_mcp/core/exceptions.py index 8b10f4c..2731387 100644 --- a/unraid_mcp/core/exceptions.py +++ b/unraid_mcp/core/exceptions.py @@ -15,26 +15,31 @@ class ToolError(FastMCPToolError): Inherits from FastMCP's ToolError to ensure proper MCP protocol handling. """ + pass class ConfigurationError(ToolError): """Raised when there are configuration-related errors.""" + pass class UnraidAPIError(ToolError): """Raised when the Unraid API returns an error or is unreachable.""" + pass class SubscriptionError(ToolError): """Raised when there are WebSocket subscription-related errors.""" + pass class ValidationError(ToolError): """Raised when input validation fails.""" + pass @@ -45,4 +50,5 @@ class IdempotentOperationError(ToolError): which should typically be converted to a success response rather than propagated as an error to the user. """ + pass diff --git a/unraid_mcp/core/types.py b/unraid_mcp/core/types.py index 5170209..b48a4df 100644 --- a/unraid_mcp/core/types.py +++ b/unraid_mcp/core/types.py @@ -12,6 +12,7 @@ from typing import Any @dataclass class SubscriptionData: """Container for subscription data with metadata.""" + data: dict[str, Any] last_updated: datetime subscription_type: str @@ -20,6 +21,7 @@ class SubscriptionData: @dataclass class SystemHealth: """Container for system health status information.""" + is_healthy: bool issues: list[str] warnings: list[str] @@ -30,6 +32,7 @@ class SystemHealth: @dataclass class APIResponse: """Container for standardized API response data.""" + success: bool data: dict[str, Any] | None = None error: str | None = None diff --git a/unraid_mcp/main.py b/unraid_mcp/main.py index c159929..36c772b 100644 --- a/unraid_mcp/main.py +++ b/unraid_mcp/main.py @@ -13,6 +13,7 @@ async def shutdown_cleanup() -> None: """Cleanup resources on server shutdown.""" try: from .core.client import close_http_client + await close_http_client() except Exception as e: print(f"Error during cleanup: {e}") @@ -22,13 +23,17 @@ def main() -> None: """Main entry point for the Unraid MCP Server.""" try: from .server import run_server + run_server() except KeyboardInterrupt: print("\nServer stopped by user") try: asyncio.run(shutdown_cleanup()) except RuntimeError as e: - if "event loop is closed" in str(e).lower() or "no running event loop" in str(e).lower(): + if ( + "event loop is closed" in str(e).lower() + or "no running event loop" in str(e).lower() + ): pass # Expected during shutdown else: print(f"WARNING: Unexpected error during cleanup: {e}", file=sys.stderr) @@ -37,7 +42,10 @@ def main() -> None: try: asyncio.run(shutdown_cleanup()) except RuntimeError as e: - if "event loop is closed" in str(e).lower() or "no running event loop" in str(e).lower(): + if ( + "event loop is closed" in str(e).lower() + or "no running event loop" in str(e).lower() + ): pass # Expected during shutdown else: print(f"WARNING: Unexpected error during cleanup: {e}", file=sys.stderr) diff --git a/unraid_mcp/server.py b/unraid_mcp/server.py index 655ca34..18c652f 100644 --- a/unraid_mcp/server.py +++ b/unraid_mcp/server.py @@ -91,28 +91,24 @@ def run_server() -> None: # Register all modules register_all_modules() - logger.info(f"Starting Unraid MCP Server on {UNRAID_MCP_HOST}:{UNRAID_MCP_PORT} using {UNRAID_MCP_TRANSPORT} transport...") + logger.info( + f"Starting Unraid MCP Server on {UNRAID_MCP_HOST}:{UNRAID_MCP_PORT} using {UNRAID_MCP_TRANSPORT} transport..." + ) try: if UNRAID_MCP_TRANSPORT == "streamable-http": mcp.run( - transport="streamable-http", - host=UNRAID_MCP_HOST, - port=UNRAID_MCP_PORT, - path="/mcp" + transport="streamable-http", host=UNRAID_MCP_HOST, port=UNRAID_MCP_PORT, path="/mcp" ) elif UNRAID_MCP_TRANSPORT == "sse": logger.warning("SSE transport is deprecated. Consider switching to 'streamable-http'.") - mcp.run( - transport="sse", - host=UNRAID_MCP_HOST, - port=UNRAID_MCP_PORT, - path="/mcp" - ) + mcp.run(transport="sse", host=UNRAID_MCP_HOST, port=UNRAID_MCP_PORT, path="/mcp") elif UNRAID_MCP_TRANSPORT == "stdio": mcp.run() else: - logger.error(f"Unsupported MCP_TRANSPORT: {UNRAID_MCP_TRANSPORT}. Choose 'streamable-http', 'sse', or 'stdio'.") + logger.error( + f"Unsupported MCP_TRANSPORT: {UNRAID_MCP_TRANSPORT}. Choose 'streamable-http', 'sse', or 'stdio'." + ) sys.exit(1) except Exception as e: logger.critical(f"Failed to start Unraid MCP server: {e}", exc_info=True) diff --git a/unraid_mcp/subscriptions/diagnostics.py b/unraid_mcp/subscriptions/diagnostics.py index 7150012..ea77e69 100644 --- a/unraid_mcp/subscriptions/diagnostics.py +++ b/unraid_mcp/subscriptions/diagnostics.py @@ -47,7 +47,10 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: # Build WebSocket URL if not UNRAID_API_URL: raise ToolError("UNRAID_API_URL is not configured") - ws_url = UNRAID_API_URL.replace("https://", "wss://").replace("http://", "ws://") + "/graphql" + ws_url = ( + UNRAID_API_URL.replace("https://", "wss://").replace("http://", "ws://") + + "/graphql" + ) ssl_context = build_ws_ssl_context(ws_url) @@ -57,18 +60,17 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: subprotocols=[Subprotocol("graphql-transport-ws"), Subprotocol("graphql-ws")], ssl=ssl_context, ping_interval=30, - ping_timeout=10 + ping_timeout=10, ) as websocket: - # Send connection init (using standard X-API-Key format) - await websocket.send(json.dumps({ - "type": "connection_init", - "payload": { - "headers": { - "X-API-Key": UNRAID_API_KEY + await websocket.send( + json.dumps( + { + "type": "connection_init", + "payload": {"headers": {"X-API-Key": UNRAID_API_KEY}}, } - } - })) + ) + ) # Wait for ack response = await websocket.recv() @@ -78,11 +80,11 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: return {"error": f"Connection failed: {init_response}"} # Send subscription - await websocket.send(json.dumps({ - "id": "test", - "type": "start", - "payload": {"query": subscription_query} - })) + await websocket.send( + json.dumps( + {"id": "test", "type": "start", "payload": {"query": subscription_query}} + ) + ) # Wait for response with timeout try: @@ -90,26 +92,19 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: result = json.loads(response) logger.info(f"[TEST_SUBSCRIPTION] Response: {result}") - return { - "success": True, - "response": result, - "query_tested": subscription_query - } + return {"success": True, "response": result, "query_tested": subscription_query} except TimeoutError: return { "success": True, "response": "No immediate response (subscriptions may only send data on changes)", "query_tested": subscription_query, - "note": "Connection successful, subscription may be waiting for events" + "note": "Connection successful, subscription may be waiting for events", } except Exception as e: logger.error(f"[TEST_SUBSCRIPTION] Error: {e}", exc_info=True) - return { - "error": str(e), - "query_tested": subscription_query - } + return {"error": str(e), "query_tested": subscription_query} @mcp.tool() async def diagnose_subscriptions() -> dict[str, Any]: @@ -140,25 +135,29 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: "max_reconnect_attempts": subscription_manager.max_reconnect_attempts, "unraid_api_url": UNRAID_API_URL[:50] + "..." if UNRAID_API_URL else None, "api_key_configured": bool(UNRAID_API_KEY), - "websocket_url": None + "websocket_url": None, }, "subscriptions": status, "summary": { "total_configured": len(subscription_manager.subscription_configs), - "auto_start_count": sum(1 for s in subscription_manager.subscription_configs.values() if s.get("auto_start")), + "auto_start_count": sum( + 1 + for s in subscription_manager.subscription_configs.values() + if s.get("auto_start") + ), "active_count": len(subscription_manager.active_subscriptions), "with_data": len(subscription_manager.resource_data), "in_error_state": 0, - "connection_issues": connection_issues - } + "connection_issues": connection_issues, + }, } # Calculate WebSocket URL if UNRAID_API_URL: if UNRAID_API_URL.startswith("https://"): - ws_url = "wss://" + UNRAID_API_URL[len("https://"):] + ws_url = "wss://" + UNRAID_API_URL[len("https://") :] elif UNRAID_API_URL.startswith("http://"): - ws_url = "ws://" + UNRAID_API_URL[len("http://"):] + ws_url = "ws://" + UNRAID_API_URL[len("http://") :] else: ws_url = UNRAID_API_URL if not ws_url.endswith("/graphql"): @@ -174,42 +173,57 @@ def register_diagnostic_tools(mcp: FastMCP) -> None: diagnostic_info["summary"]["in_error_state"] += 1 if runtime.get("last_error"): - connection_issues.append({ - "subscription": sub_name, - "state": connection_state, - "error": runtime["last_error"] - }) + connection_issues.append( + { + "subscription": sub_name, + "state": connection_state, + "error": runtime["last_error"], + } + ) # Add troubleshooting recommendations recommendations: list[str] = [] if not diagnostic_info["environment"]["api_key_configured"]: - recommendations.append("CRITICAL: No API key configured. Set UNRAID_API_KEY environment variable.") + recommendations.append( + "CRITICAL: No API key configured. Set UNRAID_API_KEY environment variable." + ) if diagnostic_info["summary"]["in_error_state"] > 0: - recommendations.append("Some subscriptions are in error state. Check 'connection_issues' for details.") + recommendations.append( + "Some subscriptions are in error state. Check 'connection_issues' for details." + ) if diagnostic_info["summary"]["with_data"] == 0: - recommendations.append("No subscriptions have received data yet. Check WebSocket connectivity and authentication.") + recommendations.append( + "No subscriptions have received data yet. Check WebSocket connectivity and authentication." + ) - if diagnostic_info["summary"]["active_count"] < diagnostic_info["summary"]["auto_start_count"]: - recommendations.append("Not all auto-start subscriptions are active. Check server startup logs.") + if ( + diagnostic_info["summary"]["active_count"] + < diagnostic_info["summary"]["auto_start_count"] + ): + recommendations.append( + "Not all auto-start subscriptions are active. Check server startup logs." + ) diagnostic_info["troubleshooting"] = { "recommendations": recommendations, "log_commands": [ "Check server logs for [WEBSOCKET:*], [AUTH:*], [SUBSCRIPTION:*] prefixed messages", "Look for connection timeout or authentication errors", - "Verify Unraid API URL is accessible and supports GraphQL subscriptions" + "Verify Unraid API URL is accessible and supports GraphQL subscriptions", ], "next_steps": [ "If authentication fails: Verify API key has correct permissions", "If connection fails: Check network connectivity to Unraid server", - "If no data received: Enable DEBUG logging to see detailed protocol messages" - ] + "If no data received: Enable DEBUG logging to see detailed protocol messages", + ], } - logger.info(f"[DIAGNOSTIC] Completed. Active: {diagnostic_info['summary']['active_count']}, With data: {diagnostic_info['summary']['with_data']}, Errors: {diagnostic_info['summary']['in_error_state']}") + logger.info( + f"[DIAGNOSTIC] Completed. Active: {diagnostic_info['summary']['active_count']}, With data: {diagnostic_info['summary']['with_data']}, Errors: {diagnostic_info['summary']['in_error_state']}" + ) return diagnostic_info except Exception as e: diff --git a/unraid_mcp/subscriptions/manager.py b/unraid_mcp/subscriptions/manager.py index 161f429..c98be94 100644 --- a/unraid_mcp/subscriptions/manager.py +++ b/unraid_mcp/subscriptions/manager.py @@ -30,7 +30,9 @@ class SubscriptionManager: self.subscription_lock = asyncio.Lock() # Configuration - self.auto_start_enabled = os.getenv("UNRAID_AUTO_START_SUBSCRIPTIONS", "true").lower() == "true" + self.auto_start_enabled = ( + os.getenv("UNRAID_AUTO_START_SUBSCRIPTIONS", "true").lower() == "true" + ) self.reconnect_attempts: dict[str, int] = {} self.max_reconnect_attempts = int(os.getenv("UNRAID_MAX_RECONNECT_ATTEMPTS", "10")) self.connection_states: dict[str, str] = {} # Track connection state per subscription @@ -50,12 +52,16 @@ class SubscriptionManager: """, "resource": "unraid://logs/stream", "description": "Real-time log file streaming", - "auto_start": False # Started manually with path parameter + "auto_start": False, # Started manually with path parameter } } - logger.info(f"[SUBSCRIPTION_MANAGER] Initialized with auto_start={self.auto_start_enabled}, max_reconnects={self.max_reconnect_attempts}") - logger.debug(f"[SUBSCRIPTION_MANAGER] Available subscriptions: {list(self.subscription_configs.keys())}") + logger.info( + f"[SUBSCRIPTION_MANAGER] Initialized with auto_start={self.auto_start_enabled}, max_reconnects={self.max_reconnect_attempts}" + ) + logger.debug( + f"[SUBSCRIPTION_MANAGER] Available subscriptions: {list(self.subscription_configs.keys())}" + ) async def auto_start_all_subscriptions(self) -> None: """Auto-start all subscriptions marked for auto-start.""" @@ -69,21 +75,31 @@ class SubscriptionManager: for subscription_name, config in self.subscription_configs.items(): if config.get("auto_start", False): try: - logger.info(f"[SUBSCRIPTION_MANAGER] Auto-starting subscription: {subscription_name}") + logger.info( + f"[SUBSCRIPTION_MANAGER] Auto-starting subscription: {subscription_name}" + ) await self.start_subscription(subscription_name, str(config["query"])) auto_start_count += 1 except Exception as e: - logger.error(f"[SUBSCRIPTION_MANAGER] Failed to auto-start {subscription_name}: {e}") + logger.error( + f"[SUBSCRIPTION_MANAGER] Failed to auto-start {subscription_name}: {e}" + ) self.last_error[subscription_name] = str(e) - logger.info(f"[SUBSCRIPTION_MANAGER] Auto-start completed. Started {auto_start_count} subscriptions") + logger.info( + f"[SUBSCRIPTION_MANAGER] Auto-start completed. Started {auto_start_count} subscriptions" + ) - async def start_subscription(self, subscription_name: str, query: str, variables: dict[str, Any] | None = None) -> None: + async def start_subscription( + self, subscription_name: str, query: str, variables: dict[str, Any] | None = None + ) -> None: """Start a GraphQL subscription and maintain it as a resource.""" logger.info(f"[SUBSCRIPTION:{subscription_name}] Starting subscription...") if subscription_name in self.active_subscriptions: - logger.warning(f"[SUBSCRIPTION:{subscription_name}] Subscription already active, skipping") + logger.warning( + f"[SUBSCRIPTION:{subscription_name}] Subscription already active, skipping" + ) return # Reset connection tracking @@ -92,12 +108,18 @@ class SubscriptionManager: async with self.subscription_lock: try: - task = asyncio.create_task(self._subscription_loop(subscription_name, query, variables or {})) + task = asyncio.create_task( + self._subscription_loop(subscription_name, query, variables or {}) + ) self.active_subscriptions[subscription_name] = task - logger.info(f"[SUBSCRIPTION:{subscription_name}] Subscription task created and started") + logger.info( + f"[SUBSCRIPTION:{subscription_name}] Subscription task created and started" + ) self.connection_states[subscription_name] = "active" except Exception as e: - logger.error(f"[SUBSCRIPTION:{subscription_name}] Failed to start subscription task: {e}") + logger.error( + f"[SUBSCRIPTION:{subscription_name}] Failed to start subscription task: {e}" + ) self.connection_states[subscription_name] = "failed" self.last_error[subscription_name] = str(e) raise @@ -120,7 +142,9 @@ class SubscriptionManager: else: logger.warning(f"[SUBSCRIPTION:{subscription_name}] No active subscription to stop") - async def _subscription_loop(self, subscription_name: str, query: str, variables: dict[str, Any] | None) -> None: + async def _subscription_loop( + self, subscription_name: str, query: str, variables: dict[str, Any] | None + ) -> None: """Main loop for maintaining a GraphQL subscription with comprehensive logging.""" retry_delay: int | float = 5 max_retry_delay = 300 # 5 minutes max @@ -129,10 +153,14 @@ class SubscriptionManager: attempt = self.reconnect_attempts.get(subscription_name, 0) + 1 self.reconnect_attempts[subscription_name] = attempt - logger.info(f"[WEBSOCKET:{subscription_name}] Connection attempt #{attempt} (max: {self.max_reconnect_attempts})") + logger.info( + f"[WEBSOCKET:{subscription_name}] Connection attempt #{attempt} (max: {self.max_reconnect_attempts})" + ) if attempt > self.max_reconnect_attempts: - logger.error(f"[WEBSOCKET:{subscription_name}] Max reconnection attempts ({self.max_reconnect_attempts}) exceeded, stopping") + logger.error( + f"[WEBSOCKET:{subscription_name}] Max reconnection attempts ({self.max_reconnect_attempts}) exceeded, stopping" + ) self.connection_states[subscription_name] = "max_retries_exceeded" break @@ -142,9 +170,9 @@ class SubscriptionManager: raise ValueError("UNRAID_API_URL is not configured") if UNRAID_API_URL.startswith("https://"): - ws_url = "wss://" + UNRAID_API_URL[len("https://"):] + ws_url = "wss://" + UNRAID_API_URL[len("https://") :] elif UNRAID_API_URL.startswith("http://"): - ws_url = "ws://" + UNRAID_API_URL[len("http://"):] + ws_url = "ws://" + UNRAID_API_URL[len("http://") :] else: ws_url = UNRAID_API_URL @@ -152,13 +180,17 @@ class SubscriptionManager: ws_url = ws_url.rstrip("/") + "/graphql" logger.debug(f"[WEBSOCKET:{subscription_name}] Connecting to: {ws_url}") - logger.debug(f"[WEBSOCKET:{subscription_name}] API Key present: {'Yes' if UNRAID_API_KEY else 'No'}") + logger.debug( + f"[WEBSOCKET:{subscription_name}] API Key present: {'Yes' if UNRAID_API_KEY else 'No'}" + ) ssl_context = build_ws_ssl_context(ws_url) # Connection with timeout connect_timeout = 10 - logger.debug(f"[WEBSOCKET:{subscription_name}] Connection timeout: {connect_timeout}s") + logger.debug( + f"[WEBSOCKET:{subscription_name}] Connection timeout: {connect_timeout}s" + ) async with websockets.connect( ws_url, @@ -166,11 +198,12 @@ class SubscriptionManager: ping_interval=20, ping_timeout=10, close_timeout=10, - ssl=ssl_context + ssl=ssl_context, ) as websocket: - selected_proto = websocket.subprotocol or "none" - logger.info(f"[WEBSOCKET:{subscription_name}] Connected! Protocol: {selected_proto}") + logger.info( + f"[WEBSOCKET:{subscription_name}] Connected! Protocol: {selected_proto}" + ) self.connection_states[subscription_name] = "connected" # Reset retry count on successful connection @@ -178,21 +211,21 @@ class SubscriptionManager: retry_delay = 5 # Reset delay # Initialize GraphQL-WS protocol - logger.debug(f"[PROTOCOL:{subscription_name}] Initializing GraphQL-WS protocol...") + logger.debug( + f"[PROTOCOL:{subscription_name}] Initializing GraphQL-WS protocol..." + ) init_type = "connection_init" init_payload: dict[str, Any] = {"type": init_type} if UNRAID_API_KEY: logger.debug(f"[AUTH:{subscription_name}] Adding authentication payload") # Use standard X-API-Key header format (matching HTTP client) - auth_payload = { - "headers": { - "X-API-Key": UNRAID_API_KEY - } - } + auth_payload = {"headers": {"X-API-Key": UNRAID_API_KEY}} init_payload["payload"] = auth_payload else: - logger.warning(f"[AUTH:{subscription_name}] No API key available for authentication") + logger.warning( + f"[AUTH:{subscription_name}] No API key available for authentication" + ) logger.debug(f"[PROTOCOL:{subscription_name}] Sending connection_init message") await websocket.send(json.dumps(init_payload)) @@ -203,45 +236,66 @@ class SubscriptionManager: try: init_data = json.loads(init_raw) - logger.debug(f"[PROTOCOL:{subscription_name}] Received init response: {init_data.get('type')}") + logger.debug( + f"[PROTOCOL:{subscription_name}] Received init response: {init_data.get('type')}" + ) except json.JSONDecodeError as e: - init_preview = init_raw[:200] if isinstance(init_raw, str) else init_raw[:200].decode("utf-8", errors="replace") - logger.error(f"[PROTOCOL:{subscription_name}] Failed to decode init response: {init_preview}...") + init_preview = ( + init_raw[:200] + if isinstance(init_raw, str) + else init_raw[:200].decode("utf-8", errors="replace") + ) + logger.error( + f"[PROTOCOL:{subscription_name}] Failed to decode init response: {init_preview}..." + ) self.last_error[subscription_name] = f"Invalid JSON in init response: {e}" break # Handle connection acknowledgment if init_data.get("type") == "connection_ack": - logger.info(f"[PROTOCOL:{subscription_name}] Connection acknowledged successfully") + logger.info( + f"[PROTOCOL:{subscription_name}] Connection acknowledged successfully" + ) self.connection_states[subscription_name] = "authenticated" elif init_data.get("type") == "connection_error": error_payload = init_data.get("payload", {}) - logger.error(f"[AUTH:{subscription_name}] Authentication failed: {error_payload}") - self.last_error[subscription_name] = f"Authentication error: {error_payload}" + logger.error( + f"[AUTH:{subscription_name}] Authentication failed: {error_payload}" + ) + self.last_error[subscription_name] = ( + f"Authentication error: {error_payload}" + ) self.connection_states[subscription_name] = "auth_failed" break else: - logger.warning(f"[PROTOCOL:{subscription_name}] Unexpected init response: {init_data}") + logger.warning( + f"[PROTOCOL:{subscription_name}] Unexpected init response: {init_data}" + ) # Continue anyway - some servers send other messages first # Start the subscription - logger.debug(f"[SUBSCRIPTION:{subscription_name}] Starting GraphQL subscription...") - start_type = "subscribe" if selected_proto == "graphql-transport-ws" else "start" + logger.debug( + f"[SUBSCRIPTION:{subscription_name}] Starting GraphQL subscription..." + ) + start_type = ( + "subscribe" if selected_proto == "graphql-transport-ws" else "start" + ) subscription_message = { "id": subscription_name, "type": start_type, - "payload": { - "query": query, - "variables": variables - } + "payload": {"query": query, "variables": variables}, } - logger.debug(f"[SUBSCRIPTION:{subscription_name}] Subscription message type: {start_type}") + logger.debug( + f"[SUBSCRIPTION:{subscription_name}] Subscription message type: {start_type}" + ) logger.debug(f"[SUBSCRIPTION:{subscription_name}] Query: {query[:100]}...") logger.debug(f"[SUBSCRIPTION:{subscription_name}] Variables: {variables}") await websocket.send(json.dumps(subscription_message)) - logger.info(f"[SUBSCRIPTION:{subscription_name}] Subscription started successfully") + logger.info( + f"[SUBSCRIPTION:{subscription_name}] Subscription started successfully" + ) self.connection_states[subscription_name] = "subscribed" # Listen for subscription data @@ -253,57 +307,100 @@ class SubscriptionManager: message_count += 1 message_type = data.get("type", "unknown") - logger.debug(f"[DATA:{subscription_name}] Message #{message_count}: {message_type}") + logger.debug( + f"[DATA:{subscription_name}] Message #{message_count}: {message_type}" + ) # Handle different message types - expected_data_type = "next" if selected_proto == "graphql-transport-ws" else "data" + expected_data_type = ( + "next" if selected_proto == "graphql-transport-ws" else "data" + ) - if data.get("type") == expected_data_type and data.get("id") == subscription_name: + if ( + data.get("type") == expected_data_type + and data.get("id") == subscription_name + ): payload = data.get("payload", {}) if payload.get("data"): - logger.info(f"[DATA:{subscription_name}] Received subscription data update") + logger.info( + f"[DATA:{subscription_name}] Received subscription data update" + ) self.resource_data[subscription_name] = SubscriptionData( data=payload["data"], last_updated=datetime.now(), - subscription_type=subscription_name + subscription_type=subscription_name, + ) + logger.debug( + f"[RESOURCE:{subscription_name}] Resource data updated successfully" ) - logger.debug(f"[RESOURCE:{subscription_name}] Resource data updated successfully") elif payload.get("errors"): - logger.error(f"[DATA:{subscription_name}] GraphQL errors in response: {payload['errors']}") - self.last_error[subscription_name] = f"GraphQL errors: {payload['errors']}" + logger.error( + f"[DATA:{subscription_name}] GraphQL errors in response: {payload['errors']}" + ) + self.last_error[subscription_name] = ( + f"GraphQL errors: {payload['errors']}" + ) else: - logger.warning(f"[DATA:{subscription_name}] Empty or invalid data payload: {payload}") + logger.warning( + f"[DATA:{subscription_name}] Empty or invalid data payload: {payload}" + ) elif data.get("type") == "ping": - logger.debug(f"[PROTOCOL:{subscription_name}] Received ping, sending pong") + logger.debug( + f"[PROTOCOL:{subscription_name}] Received ping, sending pong" + ) await websocket.send(json.dumps({"type": "pong"})) elif data.get("type") == "error": error_payload = data.get("payload", {}) - logger.error(f"[SUBSCRIPTION:{subscription_name}] Subscription error: {error_payload}") - self.last_error[subscription_name] = f"Subscription error: {error_payload}" + logger.error( + f"[SUBSCRIPTION:{subscription_name}] Subscription error: {error_payload}" + ) + self.last_error[subscription_name] = ( + f"Subscription error: {error_payload}" + ) self.connection_states[subscription_name] = "error" elif data.get("type") == "complete": - logger.info(f"[SUBSCRIPTION:{subscription_name}] Subscription completed by server") + logger.info( + f"[SUBSCRIPTION:{subscription_name}] Subscription completed by server" + ) self.connection_states[subscription_name] = "completed" break elif data.get("type") in ["ka", "ping", "pong"]: - logger.debug(f"[PROTOCOL:{subscription_name}] Keepalive message: {message_type}") + logger.debug( + f"[PROTOCOL:{subscription_name}] Keepalive message: {message_type}" + ) else: - logger.debug(f"[PROTOCOL:{subscription_name}] Unhandled message type: {message_type}") + logger.debug( + f"[PROTOCOL:{subscription_name}] Unhandled message type: {message_type}" + ) except json.JSONDecodeError as e: - msg_preview = message[:200] if isinstance(message, str) else message[:200].decode("utf-8", errors="replace") - logger.error(f"[PROTOCOL:{subscription_name}] Failed to decode message: {msg_preview}...") + msg_preview = ( + message[:200] + if isinstance(message, str) + else message[:200].decode("utf-8", errors="replace") + ) + logger.error( + f"[PROTOCOL:{subscription_name}] Failed to decode message: {msg_preview}..." + ) logger.error(f"[PROTOCOL:{subscription_name}] JSON decode error: {e}") except Exception as e: - logger.error(f"[DATA:{subscription_name}] Error processing message: {e}") - msg_preview = message[:200] if isinstance(message, str) else message[:200].decode("utf-8", errors="replace") - logger.debug(f"[DATA:{subscription_name}] Raw message: {msg_preview}...") + logger.error( + f"[DATA:{subscription_name}] Error processing message: {e}" + ) + msg_preview = ( + message[:200] + if isinstance(message, str) + else message[:200].decode("utf-8", errors="replace") + ) + logger.debug( + f"[DATA:{subscription_name}] Raw message: {msg_preview}..." + ) except TimeoutError: error_msg = "Connection or authentication timeout" @@ -332,7 +429,9 @@ class SubscriptionManager: # Calculate backoff delay retry_delay = min(retry_delay * 1.5, max_retry_delay) - logger.info(f"[WEBSOCKET:{subscription_name}] Reconnecting in {retry_delay:.1f} seconds...") + logger.info( + f"[WEBSOCKET:{subscription_name}] Reconnecting in {retry_delay:.1f} seconds..." + ) self.connection_states[subscription_name] = "reconnecting" await asyncio.sleep(retry_delay) @@ -363,14 +462,14 @@ class SubscriptionManager: "config": { "resource": config["resource"], "description": config["description"], - "auto_start": config.get("auto_start", False) + "auto_start": config.get("auto_start", False), }, "runtime": { "active": sub_name in self.active_subscriptions, "connection_state": self.connection_states.get(sub_name, "not_started"), "reconnect_attempts": self.reconnect_attempts.get(sub_name, 0), - "last_error": self.last_error.get(sub_name, None) - } + "last_error": self.last_error.get(sub_name, None), + }, } # Add data info if available @@ -380,7 +479,7 @@ class SubscriptionManager: sub_status["data"] = { "available": True, "last_updated": data_info.last_updated.isoformat(), - "age_seconds": age_seconds + "age_seconds": age_seconds, } else: sub_status["data"] = {"available": False} diff --git a/unraid_mcp/subscriptions/resources.py b/unraid_mcp/subscriptions/resources.py index 27fba9a..b6a6842 100644 --- a/unraid_mcp/subscriptions/resources.py +++ b/unraid_mcp/subscriptions/resources.py @@ -59,7 +59,9 @@ async def autostart_subscriptions() -> None: logger.info(f"[AUTOSTART] Starting log file subscription for: {log_path}") config = subscription_manager.subscription_configs.get("logFileSubscription") if config: - await subscription_manager.start_subscription("logFileSubscription", str(config["query"]), {"path": log_path}) + await subscription_manager.start_subscription( + "logFileSubscription", str(config["query"]), {"path": log_path} + ) logger.info(f"[AUTOSTART] Log file subscription started for: {log_path}") else: logger.error("[AUTOSTART] logFileSubscription config not found") @@ -83,9 +85,11 @@ def register_subscription_resources(mcp: FastMCP) -> None: data = subscription_manager.get_resource_data("logFileSubscription") if data: return json.dumps(data, indent=2) - return json.dumps({ - "status": "No subscription data yet", - "message": "Subscriptions auto-start on server boot. If this persists, check server logs for WebSocket/auth issues." - }) + return json.dumps( + { + "status": "No subscription data yet", + "message": "Subscriptions auto-start on server boot. If this persists, check server logs for WebSocket/auth issues.", + } + ) logger.info("Subscription resources registered successfully") diff --git a/unraid_mcp/tools/array.py b/unraid_mcp/tools/array.py index 05a306e..5cf132f 100644 --- a/unraid_mcp/tools/array.py +++ b/unraid_mcp/tools/array.py @@ -1,7 +1,6 @@ -"""Array operations and system power management. +"""Array parity check operations. -Provides the `unraid_array` tool with 12 actions for array lifecycle, -parity operations, disk management, and system power control. +Provides the `unraid_array` tool with 5 actions for parity check management. """ from typing import Any, Literal @@ -22,16 +21,6 @@ QUERIES: dict[str, str] = { } MUTATIONS: dict[str, str] = { - "start": """ - mutation StartArray { - setState(input: { desiredState: STARTED }) { state } - } - """, - "stop": """ - mutation StopArray { - setState(input: { desiredState: STOPPED }) { state } - } - """, "parity_start": """ mutation StartParityCheck($correct: Boolean) { parityCheck { start(correct: $correct) } @@ -52,42 +41,16 @@ MUTATIONS: dict[str, str] = { parityCheck { cancel } } """, - "mount_disk": """ - mutation MountDisk($id: PrefixedID!) { - mountArrayDisk(id: $id) - } - """, - "unmount_disk": """ - mutation UnmountDisk($id: PrefixedID!) { - unmountArrayDisk(id: $id) - } - """, - "clear_stats": """ - mutation ClearStats($id: PrefixedID!) { - clearArrayDiskStatistics(id: $id) - } - """, - "shutdown": """ - mutation Shutdown { - shutdown - } - """, - "reboot": """ - mutation Reboot { - reboot - } - """, } -DESTRUCTIVE_ACTIONS = {"start", "stop", "shutdown", "reboot"} -DISK_ACTIONS = {"mount_disk", "unmount_disk", "clear_stats"} ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) ARRAY_ACTIONS = Literal[ - "start", "stop", - "parity_start", "parity_pause", "parity_resume", "parity_cancel", "parity_status", - "mount_disk", "unmount_disk", "clear_stats", - "shutdown", "reboot", + "parity_start", + "parity_pause", + "parity_resume", + "parity_cancel", + "parity_status", ] @@ -97,52 +60,31 @@ def register_array_tool(mcp: FastMCP) -> None: @mcp.tool() async def unraid_array( action: ARRAY_ACTIONS, - confirm: bool = False, - disk_id: str | None = None, correct: bool | None = None, ) -> dict[str, Any]: - """Manage the Unraid array and system power. + """Manage Unraid array parity checks. Actions: - start - Start the array (destructive, requires confirm=True) - stop - Stop the array (destructive, requires confirm=True) parity_start - Start parity check (optional correct=True to fix errors) parity_pause - Pause running parity check parity_resume - Resume paused parity check parity_cancel - Cancel running parity check parity_status - Get current parity check status - mount_disk - Mount an array disk (requires disk_id) - unmount_disk - Unmount an array disk (requires disk_id) - clear_stats - Clear disk statistics (requires disk_id) - shutdown - Shut down the server (destructive, requires confirm=True) - reboot - Reboot the server (destructive, requires confirm=True) """ if action not in ALL_ACTIONS: raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(ALL_ACTIONS)}") - if action in DESTRUCTIVE_ACTIONS and not confirm: - raise ToolError( - f"Action '{action}' is destructive. Set confirm=True to proceed." - ) - - if action in DISK_ACTIONS and not disk_id: - raise ToolError(f"disk_id is required for '{action}' action") - try: logger.info(f"Executing unraid_array action={action}") - # Read-only query if action in QUERIES: data = await make_graphql_request(QUERIES[action]) return {"success": True, "action": action, "data": data} - # Mutations query = MUTATIONS[action] variables: dict[str, Any] | None = None - if action in DISK_ACTIONS: - variables = {"id": disk_id} - elif action == "parity_start" and correct is not None: + if action == "parity_start" and correct is not None: variables = {"correct": correct} data = await make_graphql_request(query, variables) diff --git a/unraid_mcp/tools/docker.py b/unraid_mcp/tools/docker.py index d514345..bd9d4ad 100644 --- a/unraid_mcp/tools/docker.py +++ b/unraid_mcp/tools/docker.py @@ -99,13 +99,35 @@ MUTATIONS: dict[str, str] = { } DESTRUCTIVE_ACTIONS = {"remove"} -_ACTIONS_REQUIRING_CONTAINER_ID = {"start", "stop", "restart", "pause", "unpause", "remove", "update", "details", "logs"} +_ACTIONS_REQUIRING_CONTAINER_ID = { + "start", + "stop", + "restart", + "pause", + "unpause", + "remove", + "update", + "details", + "logs", +} ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) | {"restart"} DOCKER_ACTIONS = Literal[ - "list", "details", "start", "stop", "restart", "pause", "unpause", - "remove", "update", "update_all", "logs", - "networks", "network_details", "port_conflicts", "check_updates", + "list", + "details", + "start", + "stop", + "restart", + "pause", + "unpause", + "remove", + "update", + "update_all", + "logs", + "networks", + "network_details", + "port_conflicts", + "check_updates", ] # Docker container IDs: 64 hex chars + optional suffix (e.g., ":local") @@ -246,9 +268,7 @@ def register_docker_tool(mcp: FastMCP) -> None: return {"networks": list(networks) if isinstance(networks, list) else []} if action == "network_details": - data = await make_graphql_request( - QUERIES["network_details"], {"id": network_id} - ) + data = await make_graphql_request(QUERIES["network_details"], {"id": network_id}) return dict(data.get("dockerNetwork", {})) if action == "port_conflicts": @@ -266,13 +286,15 @@ def register_docker_tool(mcp: FastMCP) -> None: actual_id = await _resolve_container_id(container_id or "") # Stop (idempotent: treat "already stopped" as success) stop_data = await make_graphql_request( - MUTATIONS["stop"], {"id": actual_id}, + MUTATIONS["stop"], + {"id": actual_id}, operation_context={"operation": "stop"}, ) stop_was_idempotent = stop_data.get("idempotent_success", False) # Start (idempotent: treat "already running" as success) start_data = await make_graphql_request( - MUTATIONS["start"], {"id": actual_id}, + MUTATIONS["start"], + {"id": actual_id}, operation_context={"operation": "start"}, ) if start_data.get("idempotent_success"): @@ -280,7 +302,9 @@ def register_docker_tool(mcp: FastMCP) -> None: else: result = start_data.get("docker", {}).get("start", {}) response: dict[str, Any] = { - "success": True, "action": "restart", "container": result, + "success": True, + "action": "restart", + "container": result, } if stop_was_idempotent: response["note"] = "Container was already stopped before restart" @@ -294,9 +318,12 @@ def register_docker_tool(mcp: FastMCP) -> None: # Single-container mutations if action in MUTATIONS: actual_id = await _resolve_container_id(container_id or "") - op_context: dict[str, str] | None = {"operation": action} if action in ("start", "stop") else None + op_context: dict[str, str] | None = ( + {"operation": action} if action in ("start", "stop") else None + ) data = await make_graphql_request( - MUTATIONS[action], {"id": actual_id}, + MUTATIONS[action], + {"id": actual_id}, operation_context=op_context, ) diff --git a/unraid_mcp/tools/health.py b/unraid_mcp/tools/health.py index b86ba6a..32f5629 100644 --- a/unraid_mcp/tools/health.py +++ b/unraid_mcp/tools/health.py @@ -247,11 +247,13 @@ async def _diagnose_subscriptions() -> dict[str, Any]: if conn_state in ("error", "auth_failed", "timeout", "max_retries_exceeded"): diagnostic_info["summary"]["in_error_state"] += 1 if runtime.get("last_error"): - connection_issues.append({ - "subscription": sub_name, - "state": conn_state, - "error": runtime["last_error"], - }) + connection_issues.append( + { + "subscription": sub_name, + "state": conn_state, + "error": runtime["last_error"], + } + ) return diagnostic_info diff --git a/unraid_mcp/tools/info.py b/unraid_mcp/tools/info.py index 4d2356d..cdefcb3 100644 --- a/unraid_mcp/tools/info.py +++ b/unraid_mcp/tools/info.py @@ -157,10 +157,25 @@ QUERIES: dict[str, str] = { } INFO_ACTIONS = Literal[ - "overview", "array", "network", "registration", "connect", "variables", - "metrics", "services", "display", "config", "online", "owner", - "settings", "server", "servers", "flash", - "ups_devices", "ups_device", "ups_config", + "overview", + "array", + "network", + "registration", + "connect", + "variables", + "metrics", + "services", + "display", + "config", + "online", + "owner", + "settings", + "server", + "servers", + "flash", + "ups_devices", + "ups_device", + "ups_config", ] assert set(QUERIES.keys()) == set(INFO_ACTIONS.__args__), ( @@ -209,7 +224,15 @@ def _process_system_info(raw_info: dict[str, Any]) -> dict[str, Any]: def _analyze_disk_health(disks: list[dict[str, Any]]) -> dict[str, int]: """Analyze health status of disk arrays.""" - counts = {"healthy": 0, "failed": 0, "missing": 0, "new": 0, "warning": 0, "critical": 0, "unknown": 0} + counts = { + "healthy": 0, + "failed": 0, + "missing": 0, + "new": 0, + "warning": 0, + "critical": 0, + "unknown": 0, + } for disk in disks: status = disk.get("status", "").upper() warning = disk.get("warning") @@ -263,7 +286,11 @@ def _process_array_status(raw: dict[str, Any]) -> dict[str, Any]: summary["num_cache_pools"] = len(raw.get("caches", [])) health_summary: dict[str, Any] = {} - for key, label in [("parities", "parity_health"), ("disks", "data_health"), ("caches", "cache_health")]: + for key, label in [ + ("parities", "parity_health"), + ("disks", "data_health"), + ("caches", "cache_health"), + ]: if raw.get(key): health_summary[label] = _analyze_disk_health(raw[key]) @@ -377,10 +404,14 @@ def register_info_tool(mcp: FastMCP) -> None: if action == "settings": settings = data.get("settings") or {} if not settings: - raise ToolError("No settings data returned from Unraid API. Check API permissions.") + raise ToolError( + "No settings data returned from Unraid API. Check API permissions." + ) if not settings.get("unified"): logger.warning(f"Settings returned unexpected structure: {settings.keys()}") - raise ToolError(f"Unexpected settings structure. Expected 'unified' key, got: {list(settings.keys())}") + raise ToolError( + f"Unexpected settings structure. Expected 'unified' key, got: {list(settings.keys())}" + ) values = settings["unified"].get("values") or {} return dict(values) if isinstance(values, dict) else {"raw": values} diff --git a/unraid_mcp/tools/keys.py b/unraid_mcp/tools/keys.py index f6fbf69..b444ab0 100644 --- a/unraid_mcp/tools/keys.py +++ b/unraid_mcp/tools/keys.py @@ -47,7 +47,11 @@ MUTATIONS: dict[str, str] = { DESTRUCTIVE_ACTIONS = {"delete"} KEY_ACTIONS = Literal[ - "list", "get", "create", "update", "delete", + "list", + "get", + "create", + "update", + "delete", ] @@ -101,9 +105,7 @@ def register_keys_tool(mcp: FastMCP) -> None: input_data["roles"] = roles if permissions: input_data["permissions"] = permissions - data = await make_graphql_request( - MUTATIONS["create"], {"input": input_data} - ) + data = await make_graphql_request(MUTATIONS["create"], {"input": input_data}) return { "success": True, "key": data.get("createApiKey", {}), @@ -117,9 +119,7 @@ def register_keys_tool(mcp: FastMCP) -> None: input_data["name"] = name if roles: input_data["roles"] = roles - data = await make_graphql_request( - MUTATIONS["update"], {"input": input_data} - ) + data = await make_graphql_request(MUTATIONS["update"], {"input": input_data}) return { "success": True, "key": data.get("updateApiKey", {}), @@ -128,12 +128,12 @@ def register_keys_tool(mcp: FastMCP) -> None: if action == "delete": if not key_id: raise ToolError("key_id is required for 'delete' action") - data = await make_graphql_request( - MUTATIONS["delete"], {"input": {"ids": [key_id]}} - ) + data = await make_graphql_request(MUTATIONS["delete"], {"input": {"ids": [key_id]}}) result = data.get("deleteApiKeys") if not result: - raise ToolError(f"Failed to delete API key '{key_id}': no confirmation from server") + raise ToolError( + f"Failed to delete API key '{key_id}': no confirmation from server" + ) return { "success": True, "message": f"API key '{key_id}' deleted", diff --git a/unraid_mcp/tools/notifications.py b/unraid_mcp/tools/notifications.py index 00927d7..635d01a 100644 --- a/unraid_mcp/tools/notifications.py +++ b/unraid_mcp/tools/notifications.py @@ -78,8 +78,15 @@ MUTATIONS: dict[str, str] = { DESTRUCTIVE_ACTIONS = {"delete", "delete_archived"} NOTIFICATION_ACTIONS = Literal[ - "overview", "list", "warnings", - "create", "archive", "unread", "delete", "delete_archived", "archive_all", + "overview", + "list", + "warnings", + "create", + "archive", + "unread", + "delete", + "delete_archived", + "archive_all", ] @@ -115,7 +122,9 @@ def register_notifications_tool(mcp: FastMCP) -> None: """ all_actions = {**QUERIES, **MUTATIONS} if action not in all_actions: - raise ToolError(f"Invalid action '{action}'. Must be one of: {list(all_actions.keys())}") + raise ToolError( + f"Invalid action '{action}'. Must be one of: {list(all_actions.keys())}" + ) if action in DESTRUCTIVE_ACTIONS and not confirm: raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") @@ -136,9 +145,7 @@ def register_notifications_tool(mcp: FastMCP) -> None: } if importance: filter_vars["importance"] = importance.upper() - data = await make_graphql_request( - QUERIES["list"], {"filter": filter_vars} - ) + data = await make_graphql_request(QUERIES["list"], {"filter": filter_vars}) notifications = data.get("notifications", {}) result = notifications.get("list", []) return {"notifications": list(result) if isinstance(result, list) else []} @@ -151,33 +158,25 @@ def register_notifications_tool(mcp: FastMCP) -> None: if action == "create": if title is None or subject is None or description is None or importance is None: - raise ToolError( - "create requires title, subject, description, and importance" - ) + raise ToolError("create requires title, subject, description, and importance") input_data = { "title": title, "subject": subject, "description": description, "importance": importance.upper(), } - data = await make_graphql_request( - MUTATIONS["create"], {"input": input_data} - ) + data = await make_graphql_request(MUTATIONS["create"], {"input": input_data}) return {"success": True, "data": data} if action in ("archive", "unread"): if not notification_id: raise ToolError(f"notification_id is required for '{action}' action") - data = await make_graphql_request( - MUTATIONS[action], {"id": notification_id} - ) + data = await make_graphql_request(MUTATIONS[action], {"id": notification_id}) return {"success": True, "action": action, "data": data} if action == "delete": if not notification_id or not notification_type: - raise ToolError( - "delete requires notification_id and notification_type" - ) + raise ToolError("delete requires notification_id and notification_type") data = await make_graphql_request( MUTATIONS["delete"], {"id": notification_id, "type": notification_type.upper()}, diff --git a/unraid_mcp/tools/rclone.py b/unraid_mcp/tools/rclone.py index fe61fc3..1a496aa 100644 --- a/unraid_mcp/tools/rclone.py +++ b/unraid_mcp/tools/rclone.py @@ -43,7 +43,10 @@ DESTRUCTIVE_ACTIONS = {"delete_remote"} ALL_ACTIONS = set(QUERIES) | set(MUTATIONS) RCLONE_ACTIONS = Literal[ - "list_remotes", "config_form", "create_remote", "delete_remote", + "list_remotes", + "config_form", + "create_remote", + "delete_remote", ] @@ -84,9 +87,7 @@ def register_rclone_tool(mcp: FastMCP) -> None: variables: dict[str, Any] = {} if provider_type: variables["formOptions"] = {"providerType": provider_type} - data = await make_graphql_request( - QUERIES["config_form"], variables or None - ) + data = await make_graphql_request(QUERIES["config_form"], variables or None) form = data.get("rclone", {}).get("configForm", {}) if not form: raise ToolError("No RClone config form data received") @@ -94,16 +95,16 @@ def register_rclone_tool(mcp: FastMCP) -> None: if action == "create_remote": if name is None or provider_type is None or config_data is None: - raise ToolError( - "create_remote requires name, provider_type, and config_data" - ) + raise ToolError("create_remote requires name, provider_type, and config_data") data = await make_graphql_request( MUTATIONS["create_remote"], {"input": {"name": name, "type": provider_type, "config": config_data}}, ) remote = data.get("rclone", {}).get("createRCloneRemote") if not remote: - raise ToolError(f"Failed to create remote '{name}': no confirmation from server") + raise ToolError( + f"Failed to create remote '{name}': no confirmation from server" + ) return { "success": True, "message": f"Remote '{name}' created successfully", diff --git a/unraid_mcp/tools/storage.py b/unraid_mcp/tools/storage.py index 4641d31..e5d938f 100644 --- a/unraid_mcp/tools/storage.py +++ b/unraid_mcp/tools/storage.py @@ -57,7 +57,12 @@ QUERIES: dict[str, str] = { } STORAGE_ACTIONS = Literal[ - "shares", "disks", "disk_details", "unassigned", "log_files", "logs", + "shares", + "disks", + "disk_details", + "unassigned", + "log_files", + "logs", ] diff --git a/unraid_mcp/tools/users.py b/unraid_mcp/tools/users.py index 06c0282..2d9edab 100644 --- a/unraid_mcp/tools/users.py +++ b/unraid_mcp/tools/users.py @@ -1,7 +1,7 @@ -"""User management. +"""User account query. -Provides the `unraid_users` tool with 8 actions for managing users, -cloud access, remote access settings, and allowed origins. +Provides the `unraid_users` tool with 1 action for querying the current authenticated user. +Note: Unraid GraphQL API does not support user management operations (list, add, delete). """ from typing import Any, Literal @@ -19,146 +19,37 @@ QUERIES: dict[str, str] = { me { id name description roles } } """, - "list": """ - query ListUsers { - users { id name description roles } - } - """, - "get": """ - query GetUser($id: ID!) { - user(id: $id) { id name description roles } - } - """, - "cloud": """ - query GetCloud { - cloud { status error } - } - """, - "remote_access": """ - query GetRemoteAccess { - remoteAccess { enabled url } - } - """, - "origins": """ - query GetAllowedOrigins { - allowedOrigins - } - """, } -MUTATIONS: dict[str, str] = { - "add": """ - mutation AddUser($input: addUserInput!) { - addUser(input: $input) { id name description roles } - } - """, - "delete": """ - mutation DeleteUser($input: deleteUserInput!) { - deleteUser(input: $input) { id name } - } - """, -} +ALL_ACTIONS = set(QUERIES) -DESTRUCTIVE_ACTIONS = {"delete"} - -USER_ACTIONS = Literal[ - "me", "list", "get", "add", "delete", "cloud", "remote_access", "origins", -] +USER_ACTIONS = Literal["me"] def register_users_tool(mcp: FastMCP) -> None: """Register the unraid_users tool with the FastMCP instance.""" @mcp.tool() - async def unraid_users( - action: USER_ACTIONS, - confirm: bool = False, - user_id: str | None = None, - name: str | None = None, - password: str | None = None, - role: str | None = None, - ) -> dict[str, Any]: - """Manage Unraid users and access settings. + async def unraid_users(action: USER_ACTIONS = "me") -> dict[str, Any]: + """Query current authenticated user. Actions: - me - Get current authenticated user info - list - List all users - get - Get a specific user (requires user_id) - add - Add a new user (requires name, password; optional role) - delete - Delete a user (requires user_id, confirm=True) - cloud - Get Unraid Connect cloud status - remote_access - Get remote access settings - origins - Get allowed origins - """ - all_actions = set(QUERIES) | set(MUTATIONS) - if action not in all_actions: - raise ToolError(f"Invalid action '{action}'. Must be one of: {sorted(all_actions)}") + me - Get current authenticated user info (id, name, description, roles) - if action in DESTRUCTIVE_ACTIONS and not confirm: - raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") + Note: Unraid API does not support user management operations (list, add, delete). + """ + if action not in ALL_ACTIONS: + raise ToolError(f"Invalid action '{action}'. Must be: me") try: - logger.info(f"Executing unraid_users action={action}") - - if action == "me": - data = await make_graphql_request(QUERIES["me"]) - return data.get("me") or {} - - if action == "list": - data = await make_graphql_request(QUERIES["list"]) - users = data.get("users", []) - return {"users": list(users) if isinstance(users, list) else []} - - if action == "get": - if not user_id: - raise ToolError("user_id is required for 'get' action") - data = await make_graphql_request(QUERIES["get"], {"id": user_id}) - return data.get("user") or {} - - if action == "add": - if not name or not password: - raise ToolError("add requires name and password") - input_data: dict[str, Any] = {"name": name, "password": password} - if role: - input_data["role"] = role.upper() - data = await make_graphql_request( - MUTATIONS["add"], {"input": input_data} - ) - return { - "success": True, - "user": data.get("addUser", {}), - } - - if action == "delete": - if not user_id: - raise ToolError("user_id is required for 'delete' action") - data = await make_graphql_request( - MUTATIONS["delete"], {"input": {"id": user_id}} - ) - return { - "success": True, - "message": f"User '{user_id}' deleted", - } - - if action == "cloud": - data = await make_graphql_request(QUERIES["cloud"]) - return data.get("cloud") or {} - - if action == "remote_access": - data = await make_graphql_request(QUERIES["remote_access"]) - return data.get("remoteAccess") or {} - - if action == "origins": - data = await make_graphql_request(QUERIES["origins"]) - origins = data.get("allowedOrigins", []) - return {"origins": list(origins) if isinstance(origins, list) else []} - - raise ToolError(f"Unhandled action '{action}' — this is a bug") + logger.info("Executing unraid_users action=me") + data = await make_graphql_request(QUERIES["me"]) + return data.get("me") or {} except ToolError: raise except Exception as e: - logger.error(f"Error in unraid_users action={action}: {e}", exc_info=True) - raise ToolError(f"Failed to execute users/{action}: {e!s}") from e + logger.error(f"Error in unraid_users action=me: {e}", exc_info=True) + raise ToolError(f"Failed to execute users/me: {e!s}") from e logger.info("Users tool registered successfully") diff --git a/unraid_mcp/tools/virtualization.py b/unraid_mcp/tools/virtualization.py index 074ab3a..d6ccf9a 100644 --- a/unraid_mcp/tools/virtualization.py +++ b/unraid_mcp/tools/virtualization.py @@ -53,8 +53,15 @@ _MUTATION_FIELDS: dict[str, str] = { DESTRUCTIVE_ACTIONS = {"force_stop", "reset"} VM_ACTIONS = Literal[ - "list", "details", - "start", "stop", "pause", "resume", "force_stop", "reboot", "reset", + "list", + "details", + "start", + "stop", + "pause", + "resume", + "force_stop", + "reboot", + "reset", ] @@ -111,21 +118,15 @@ def register_vm_tool(mcp: FastMCP) -> None: or vm.get("name") == vm_id ): return dict(vm) - available = [ - f"{v.get('name')} (UUID: {v.get('uuid')})" for v in vms - ] - raise ToolError( - f"VM '{vm_id}' not found. Available: {', '.join(available)}" - ) + available = [f"{v.get('name')} (UUID: {v.get('uuid')})" for v in vms] + raise ToolError(f"VM '{vm_id}' not found. Available: {', '.join(available)}") if action == "details": raise ToolError("No VM data returned from server") return {"vms": []} # Mutations if action in MUTATIONS: - data = await make_graphql_request( - MUTATIONS[action], {"id": vm_id} - ) + data = await make_graphql_request(MUTATIONS[action], {"id": vm_id}) field = _MUTATION_FIELDS.get(action, action) if data.get("vm") and field in data["vm"]: return { diff --git a/uv.lock b/uv.lock index 6e32b71..cc0a3e7 100644 --- a/uv.lock +++ b/uv.lock @@ -422,6 +422,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e2/c7/562ff39f25de27caec01e4c1e88cbb5fcae5160802ba3d90be33165df24f/fastmcp-2.12.4-py3-none-any.whl", hash = "sha256:56188fbbc1a9df58c537063f25958c57b5c4d715f73e395c41b51550b247d140", size = 329090, upload-time = "2025-09-26T16:43:25.314Z" }, ] +[[package]] +name = "graphql-core" +version = "3.2.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ac/9b/037a640a2983b09aed4a823f9cf1729e6d780b0671f854efa4727a7affbe/graphql_core-3.2.7.tar.gz", hash = "sha256:27b6904bdd3b43f2a0556dad5d579bdfdeab1f38e8e8788e555bdcb586a6f62c", size = 513484, upload-time = "2025-11-01T22:30:40.436Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/14/933037032608787fb92e365883ad6a741c235e0ff992865ec5d904a38f1e/graphql_core-3.2.7-py3-none-any.whl", hash = "sha256:17fc8f3ca4a42913d8e24d9ac9f08deddf0a0b2483076575757f6c412ead2ec0", size = 207262, upload-time = "2025-11-01T22:30:38.912Z" }, +] + [[package]] name = "h11" version = "0.16.0" @@ -1222,6 +1231,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" }, ] +[[package]] +name = "respx" +version = "0.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/7c/96bd0bc759cf009675ad1ee1f96535edcb11e9666b985717eb8c87192a95/respx-0.22.0.tar.gz", hash = "sha256:3c8924caa2a50bd71aefc07aa812f2466ff489f1848c96e954a5362d17095d91", size = 28439, upload-time = "2024-12-19T22:33:59.374Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/67/afbb0978d5399bc9ea200f1d4489a23c9a1dad4eee6376242b8182389c79/respx-0.22.0-py2.py3-none-any.whl", hash = "sha256:631128d4c9aba15e56903fb5f66fb1eff412ce28dd387ca3a81339e52dbd3ad0", size = 25127, upload-time = "2024-12-19T22:33:57.837Z" }, +] + [[package]] name = "rfc3339-validator" version = "0.1.4" @@ -1524,9 +1545,11 @@ dependencies = [ [package.dev-dependencies] dev = [ { name = "build" }, + { name = "graphql-core" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-cov" }, + { name = "respx" }, { name = "ruff" }, { name = "twine" }, { name = "ty" }, @@ -1548,9 +1571,11 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ { name = "build", specifier = ">=1.2.2" }, + { name = "graphql-core", specifier = ">=3.2.0" }, { name = "pytest", specifier = ">=8.4.2" }, { name = "pytest-asyncio", specifier = ">=1.2.0" }, { name = "pytest-cov", specifier = ">=7.0.0" }, + { name = "respx", specifier = ">=0.22.0" }, { name = "ruff", specifier = ">=0.12.8" }, { name = "twine", specifier = ">=6.0.1" }, { name = "ty", specifier = ">=0.0.15" },