From 184b8aca1c1eeb73ea246b7e3613f00e265eec1a Mon Sep 17 00:00:00 2001 From: Jacob Magar Date: Sun, 15 Feb 2026 16:42:58 -0500 Subject: [PATCH] fix: address 18 CRITICAL+HIGH PR review comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Critical Fixes (7 issues):** - Fix GraphQL schema field names in users tool (role→roles, remove email) - Fix GraphQL mutation signatures (addUserInput, deleteUser input) - Fix dict(None) TypeError guards in users tool (use `or {}` pattern) - Fix FastAPI version constraint (0.116.1→0.115.0) - Fix WebSocket SSL context handling (support CA bundles, bool, and None) - Fix critical disk threshold treated as warning (split counters) **High Priority Fixes (11 issues):** - Fix Docker update/remove action response field mapping - Fix path traversal vulnerability in log validation (normalize paths) - Fix deleteApiKeys validation (check response before success) - Fix rclone create_remote validation (check response) - Fix keys input_data type annotation (dict[str, Any]) - Fix VM domain/domains fallback restoration **Changes by file:** - unraid_mcp/tools/docker.py: Response field mapping - unraid_mcp/tools/info.py: Split critical/warning counters - unraid_mcp/tools/storage.py: Path normalization for traversal protection - unraid_mcp/tools/users.py: GraphQL schema + null handling - unraid_mcp/tools/keys.py: Validation + type annotations - unraid_mcp/tools/rclone.py: Response validation - unraid_mcp/tools/virtualization.py: Domain fallback - unraid_mcp/subscriptions/manager.py: SSL context creation - pyproject.toml: FastAPI version fix - tests/*: New tests for all fixes **Review threads resolved:** PRRT_kwDOO6Hdxs5uu70L, PRRT_kwDOO6Hdxs5uu70O, PRRT_kwDOO6Hdxs5uu70V, PRRT_kwDOO6Hdxs5uu70e, PRRT_kwDOO6Hdxs5uu70i, PRRT_kwDOO6Hdxs5uu7zn, PRRT_kwDOO6Hdxs5uu7z_, PRRT_kwDOO6Hdxs5uu7sI, PRRT_kwDOO6Hdxs5uu7sJ, PRRT_kwDOO6Hdxs5uu7sK, PRRT_kwDOO6Hdxs5uu7Tk, PRRT_kwDOO6Hdxs5uu7Tn, PRRT_kwDOO6Hdxs5uu7Tr, PRRT_kwDOO6Hdxs5uu7Ts, PRRT_kwDOO6Hdxs5uu7Tu, PRRT_kwDOO6Hdxs5uu7Tv, PRRT_kwDOO6Hdxs5uu7Tw, PRRT_kwDOO6Hdxs5uu7Tx All tests passing. Co-authored-by: docker-fixer Co-authored-by: info-fixer Co-authored-by: storage-fixer Co-authored-by: users-fixer Co-authored-by: config-fixer Co-authored-by: websocket-fixer Co-authored-by: keys-rclone-fixer Co-authored-by: vm-fixer --- .claude-plugin/README.md | 70 + .claude-plugin/marketplace.json | 22 + .gitignore | 3 + .plan.md | 544 +++ MARKETPLACE.md | 203 ++ README.md | 23 + dev.sh | 556 --- pyproject.toml | 2 +- scripts/validate-marketplace.sh | 80 + skills/unraid/.claude-plugin/plugin.json | 27 + skills/unraid/README.md | 149 + skills/unraid/SKILL.md | 210 ++ skills/unraid/examples/disk-health.sh | 23 + skills/unraid/examples/read-logs.sh | 23 + skills/unraid/references/api-reference.md | 946 +++++ skills/unraid/references/endpoints.md | 49 + .../unraid/references/introspection-schema.md | 3114 +++++++++++++++++ skills/unraid/references/quick-reference.md | 219 ++ skills/unraid/references/schema.graphql | 3114 +++++++++++++++++ skills/unraid/references/troubleshooting.md | 34 + skills/unraid/scripts/dashboard.sh | 214 ++ skills/unraid/scripts/unraid-query.sh | 126 + skills/unraid/setup.sh | 39 + tests/test_info.py | 24 + tests/test_storage.py | 9 + tests/test_users.py | 40 +- unraid_mcp/subscriptions/manager.py | 13 +- unraid_mcp/tools/docker.py | 8 +- unraid_mcp/tools/info.py | 12 +- unraid_mcp/tools/keys.py | 5 +- unraid_mcp/tools/rclone.py | 4 +- unraid_mcp/tools/storage.py | 6 +- unraid_mcp/tools/users.py | 26 +- unraid_mcp/tools/virtualization.py | 9 +- uv.lock | 2 +- 35 files changed, 9360 insertions(+), 588 deletions(-) create mode 100644 .claude-plugin/README.md create mode 100644 .claude-plugin/marketplace.json create mode 100644 .plan.md create mode 100644 MARKETPLACE.md delete mode 100755 dev.sh create mode 100755 scripts/validate-marketplace.sh create mode 100644 skills/unraid/.claude-plugin/plugin.json create mode 100644 skills/unraid/README.md create mode 100644 skills/unraid/SKILL.md create mode 100755 skills/unraid/examples/disk-health.sh create mode 100755 skills/unraid/examples/read-logs.sh create mode 100644 skills/unraid/references/api-reference.md create mode 100644 skills/unraid/references/endpoints.md create mode 100644 skills/unraid/references/introspection-schema.md create mode 100644 skills/unraid/references/quick-reference.md create mode 100644 skills/unraid/references/schema.graphql create mode 100644 skills/unraid/references/troubleshooting.md create mode 100755 skills/unraid/scripts/dashboard.sh create mode 100755 skills/unraid/scripts/unraid-query.sh create mode 100755 skills/unraid/setup.sh diff --git a/.claude-plugin/README.md b/.claude-plugin/README.md new file mode 100644 index 0000000..006ee8b --- /dev/null +++ b/.claude-plugin/README.md @@ -0,0 +1,70 @@ +# Unraid MCP Marketplace + +This directory contains the Claude Code marketplace configuration for the Unraid MCP server and skills. + +## Installation + +### From GitHub (Recommended) + +```bash +# Add the marketplace +/plugin marketplace add jmagar/unraid-mcp + +# Install the Unraid skill +/plugin install unraid @unraid-mcp +``` + +### From Local Path (Development) + +```bash +# Add local marketplace +/plugin marketplace add /path/to/unraid-mcp + +# Install the plugin +/plugin install unraid @unraid-mcp +``` + +## Available Plugins + +### unraid +Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring. + +**Features:** +- 27 read-only API endpoints +- Real-time system metrics +- Disk health and temperature monitoring +- Docker container management +- VM status and control +- Log file access +- Network share information +- Notification management + +**Version:** 1.1.0 +**Category:** Infrastructure +**Tags:** unraid, monitoring, homelab, graphql, docker, virtualization + +## Configuration + +After installation, configure your Unraid server credentials: + +```bash +export UNRAID_URL="https://your-unraid-server/graphql" +export UNRAID_API_KEY="your-api-key" +``` + +**Getting an API Key:** +1. Open Unraid WebUI +2. Go to Settings → Management Access → API Keys +3. Click "Create" and select "Viewer" role +4. Copy the generated API key + +## Documentation + +- **Plugin Documentation:** See `skills/unraid/README.md` +- **MCP Server Documentation:** See root `README.md` +- **API Reference:** See `skills/unraid/references/` + +## Support + +- **Issues:** https://github.com/jmagar/unraid-mcp/issues +- **Repository:** https://github.com/jmagar/unraid-mcp diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json new file mode 100644 index 0000000..4093bb0 --- /dev/null +++ b/.claude-plugin/marketplace.json @@ -0,0 +1,22 @@ +{ + "name": "unraid-mcp", + "description": "Comprehensive Unraid server management and monitoring tools via GraphQL API", + "version": "1.0.0", + "owner": { + "name": "jmagar", + "email": "jmagar@users.noreply.github.com", + "url": "https://github.com/jmagar" + }, + "homepage": "https://github.com/jmagar/unraid-mcp", + "repository": "https://github.com/jmagar/unraid-mcp", + "plugins": [ + { + "name": "unraid", + "source": "./skills/unraid", + "description": "Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring", + "version": "1.1.0", + "tags": ["unraid", "monitoring", "homelab", "graphql", "docker", "virtualization"], + "category": "infrastructure" + } + ] +} diff --git a/.gitignore b/.gitignore index 4621270..122508e 100644 --- a/.gitignore +++ b/.gitignore @@ -37,6 +37,9 @@ logs/ docs/plans/ docs/sessions/ +# Test planning documents +DESTRUCTIVE_ACTIONS.md + # Google OAuth client secrets client_secret_*.apps.googleusercontent.com.json diff --git a/.plan.md b/.plan.md new file mode 100644 index 0000000..826e8a5 --- /dev/null +++ b/.plan.md @@ -0,0 +1,544 @@ +# Implementation Plan: mcporter Integration Tests + Destructive Action Gating + +**Date:** 2026-02-15 +**Status:** Awaiting Approval +**Estimated Effort:** 8-12 hours + +## Overview + +Implement comprehensive integration testing using mcporter CLI to validate all 86 tool actions (after removing 4 destructive array operations) against live Unraid servers, plus add environment variable gates for remaining destructive actions to prevent accidental operations. + +## Requirements + +1. **Remove destructive array operations** - start, stop, shutdown, reboot should not be exposed via MCP +2. **Add per-tool environment variable gates** - UNRAID_ALLOW_*_DESTRUCTIVE flags for remaining destructive actions +3. **Build mcporter test suite** - Real end-to-end testing of all 86 actions against live servers (tootie/shart) +4. **Document all actions** - Comprehensive action catalog with test specifications + +## Architecture Changes + +### 1. Settings Infrastructure (Pydantic-based) + +**File:** `unraid_mcp/config/settings.py` + +- Migrate from simple `os.getenv()` to Pydantic `BaseSettings` +- Add 7 destructive action gate flags (all default to False for safety): + - `allow_docker_destructive` (docker remove) + - `allow_vm_destructive` (vm force_stop, reset) + - `allow_notifications_destructive` (delete, delete_archived) + - `allow_rclone_destructive` (delete_remote) + - `allow_users_destructive` (user delete) + - `allow_keys_destructive` (key delete) + - `allow_array_destructive` (REMOVED - no longer needed after task 1) +- Add `get_config_summary()` method showing gate status +- Maintain backwards compatibility via module-level exports + +**Dependencies:** Add `pydantic-settings` to `pyproject.toml` + +### 2. Tool Implementation Pattern + +**Pattern for all tools with destructive actions:** + +```python +from ..config.settings import settings + +# In tool function: +if action in DESTRUCTIVE_ACTIONS: + # Check 1: Environment variable gate (first line of defense) + if not settings.allow_{tool}_destructive: + raise ToolError( + f"Destructive {tool} action '{action}' is disabled. " + f"Set UNRAID_ALLOW_{TOOL}_DESTRUCTIVE=true to enable. " + f"This is a safety gate to prevent accidental operations." + ) + + # Check 2: Runtime confirmation (second line of defense) + if not confirm: + raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") +``` + +**Tools requiring updates:** +- `unraid_mcp/tools/docker.py` (1 action: remove) +- `unraid_mcp/tools/virtualization.py` (2 actions: force_stop, reset) +- `unraid_mcp/tools/notifications.py` (2 actions: delete, delete_archived) +- `unraid_mcp/tools/rclone.py` (1 action: delete_remote) +- `unraid_mcp/tools/users.py` (1 action: delete) +- `unraid_mcp/tools/keys.py` (1 action: delete) + +### 3. mcporter Integration Test Suite + +**New Directory Structure:** + +``` +tests/integration/ +├── helpers/ +│ ├── mcporter.sh # mcporter wrapper (call_tool, call_destructive, get_field) +│ ├── validation.sh # Response validation (assert_fields, assert_equals, assert_success) +│ └── reporting.sh # Test reporting (init_report, record_test, generate_summary) +├── tools/ +│ ├── test_health.sh # 3 actions +│ ├── test_info.sh # 19 actions +│ ├── test_storage.sh # 6 actions +│ ├── test_docker.sh # 15 actions +│ ├── test_vm.sh # 9 actions +│ ├── test_notifications.sh # 9 actions +│ ├── test_rclone.sh # 4 actions +│ ├── test_users.sh # 8 actions +│ ├── test_keys.sh # 5 actions +│ └── test_array.sh # 8 actions (after removal) +├── run-all.sh # Master test runner (parallel/sequential) +├── run-tool.sh # Single tool runner +└── README.md # Integration test documentation +``` + +**mcporter Configuration:** `config/mcporter.json` + +```json +{ + "mcpServers": { + "unraid-tootie": { + "command": "uv", + "args": ["run", "unraid-mcp-server"], + "env": { + "UNRAID_API_URL": "https://myunraid.net:31337/graphql", + "UNRAID_API_KEY": "${UNRAID_TOOTIE_API_KEY}", + "UNRAID_VERIFY_SSL": "false", + "UNRAID_MCP_TRANSPORT": "stdio" + }, + "cwd": "/home/jmagar/workspace/unraid-mcp" + }, + "unraid-shart": { + "command": "uv", + "args": ["run", "unraid-mcp-server"], + "env": { + "UNRAID_API_URL": "http://100.118.209.1/graphql", + "UNRAID_API_KEY": "${UNRAID_SHART_API_KEY}", + "UNRAID_VERIFY_SSL": "false", + "UNRAID_MCP_TRANSPORT": "stdio" + }, + "cwd": "/home/jmagar/workspace/unraid-mcp" + } + } +} +``` + +## Implementation Tasks + +### Task 1: Remove Destructive Array Operations + +**Files:** +- `unraid_mcp/tools/array.py` +- `tests/test_array.py` + +**Changes:** +1. Remove from `MUTATIONS` dict: + - `start` (lines 24-28) + - `stop` (lines 29-33) + - `shutdown` (lines 69-73) + - `reboot` (lines 74-78) +2. Remove from `DESTRUCTIVE_ACTIONS` set (line 81) - set becomes empty `{}` +3. Remove from `ARRAY_ACTIONS` Literal type (lines 85-86) +4. Update docstring removing these 4 actions (lines 105-106, 115-116) +5. Remove tests for these actions in `tests/test_array.py` + +**Acceptance:** +- ✅ Array tool has 8 actions (down from 12) +- ✅ `DESTRUCTIVE_ACTIONS` is empty set +- ✅ Tests pass for remaining actions +- ✅ Removed mutations are not callable + +### Task 2: Add Pydantic Settings with Destructive Gates + +**Files:** +- `unraid_mcp/config/settings.py` +- `pyproject.toml` +- `.env.example` + +**Changes:** + +1. **Add dependency:** `pydantic-settings>=2.12` in `pyproject.toml` dependencies + +2. **Update settings.py:** + - Import `BaseSettings` from `pydantic_settings` + - Create `UnraidSettings` class with all config fields + - Add 6 destructive gate fields (all default to False): + - `allow_docker_destructive: bool = Field(default=False, ...)` + - `allow_vm_destructive: bool = Field(default=False, ...)` + - `allow_notifications_destructive: bool = Field(default=False, ...)` + - `allow_rclone_destructive: bool = Field(default=False, ...)` + - `allow_users_destructive: bool = Field(default=False, ...)` + - `allow_keys_destructive: bool = Field(default=False, ...)` + - Add `get_config_summary()` method including gate status + - Instantiate global `settings = UnraidSettings()` + - Keep backwards compatibility exports + +3. **Update .env.example:** Add section documenting all destructive gates + +**Acceptance:** +- ✅ `settings` instance loads successfully +- ✅ All gate fields default to False +- ✅ `get_config_summary()` shows gate status +- ✅ Backwards compatibility maintained (existing code still works) + +### Task 3: Update Tools with Environment Variable Gates + +**Files to update:** +- `unraid_mcp/tools/docker.py` +- `unraid_mcp/tools/virtualization.py` +- `unraid_mcp/tools/notifications.py` +- `unraid_mcp/tools/rclone.py` +- `unraid_mcp/tools/users.py` +- `unraid_mcp/tools/keys.py` + +**Pattern for each tool:** + +1. Add import: `from ..config.settings import settings` +2. Add gate check before confirm check in destructive action handler: + ```python + if action in DESTRUCTIVE_ACTIONS: + if not settings.allow_{tool}_destructive: + raise ToolError( + f"Destructive {tool} action '{action}' is disabled. " + f"Set UNRAID_ALLOW_{TOOL}_DESTRUCTIVE=true to enable." + ) + if not confirm: + raise ToolError(f"Action '{action}' is destructive. Set confirm=True to proceed.") + ``` +3. Update tool docstring documenting security requirements + +**Acceptance (per tool):** +- ✅ Destructive action fails with clear error when env var not set +- ✅ Destructive action still requires confirm=True when env var is set +- ✅ Both checks must pass for execution +- ✅ Error messages guide user to correct env var + +### Task 4: Update Test Suite with Settings Mocking + +**Files:** +- `tests/conftest.py` +- `tests/test_docker.py` +- `tests/test_vm.py` +- `tests/test_notifications.py` +- `tests/test_rclone.py` +- `tests/test_users.py` +- `tests/test_keys.py` + +**Changes:** + +1. **Add fixtures to conftest.py:** + ```python + @pytest.fixture + def mock_settings(): + # All gates disabled + + @pytest.fixture + def mock_settings_all_enabled(mock_settings): + # All gates enabled + ``` + +2. **Update each test file:** + - Add `mock_settings` parameter to fixtures + - Wrap tool calls with `with patch("unraid_mcp.tools.{tool}.settings", mock_settings):` + - Add 3 destructive action tests: + - Test gate check (env var not set, confirm=True → fails) + - Test confirm check (env var set, confirm=False → fails) + - Test success (env var set, confirm=True → succeeds) + +**Acceptance:** +- ✅ All 150 existing tests pass +- ✅ New gate tests cover all destructive actions +- ✅ Tests verify correct error messages +- ✅ Tests use mocked settings (don't rely on actual env vars) + +### Task 5: Create mcporter Configuration + +**Files:** +- `config/mcporter.json` (new) +- `tests/integration/README.md` (new) + +**Changes:** + +1. Create `config/mcporter.json` with tootie and shart server configs +2. Document how to use mcporter with the server in README +3. Include instructions for loading credentials from `~/workspace/homelab/.env` + +**Acceptance:** +- ✅ `mcporter list unraid-tootie` shows all tools +- ✅ `mcporter call unraid-tootie.unraid_health action=test_connection` succeeds +- ✅ Configuration works for both servers + +### Task 6: Build mcporter Helper Libraries + +**Files to create:** +- `tests/integration/helpers/mcporter.sh` +- `tests/integration/helpers/validation.sh` +- `tests/integration/helpers/reporting.sh` + +**Functions to implement:** + +**mcporter.sh:** +- `call_tool [params...]` - Call tool via mcporter, return JSON +- `call_destructive [params...]` - Safe destructive call +- `get_field ` - Extract field from JSON +- `is_success ` - Check if response indicates success +- `get_error ` - Extract error message + +**validation.sh:** +- `assert_fields ...` - Verify required fields exist +- `assert_equals ` - Field value equality +- `assert_matches ` - Field matches regex +- `assert_success ` - Response indicates success +- `assert_failure [pattern]` - Response indicates failure (negative test) + +**reporting.sh:** +- `init_report ` - Initialize JSON report file +- `record_test [error]` - Record test result +- `generate_summary` - Generate console summary from all reports + +**Acceptance:** +- ✅ Helper functions work correctly +- ✅ Error handling is robust +- ✅ Functions are reusable across all tool tests + +### Task 7: Implement Tool Test Scripts + +**Files to create:** +- `tests/integration/tools/test_health.sh` (3 actions) +- `tests/integration/tools/test_info.sh` (19 actions) +- `tests/integration/tools/test_storage.sh` (6 actions) +- `tests/integration/tools/test_docker.sh` (15 actions) +- `tests/integration/tools/test_vm.sh` (9 actions) +- `tests/integration/tools/test_notifications.sh` (9 actions) +- `tests/integration/tools/test_rclone.sh` (4 actions) +- `tests/integration/tools/test_users.sh` (8 actions) +- `tests/integration/tools/test_keys.sh` (5 actions) +- `tests/integration/tools/test_array.sh` (8 actions) + +**Per-script implementation:** + +1. Source helper libraries +2. Initialize report +3. Implement test functions for each action: + - Basic functionality test + - Response structure validation + - Parameter validation + - Destructive action gate tests (if applicable) +4. Run all tests and record results +5. Return exit code based on failures + +**Priority order (implement in this sequence):** +1. `test_health.sh` - Simplest (3 actions, no destructive) +2. `test_info.sh` - Large but straightforward (19 query actions) +3. `test_storage.sh` - Moderate (6 query actions) +4. `test_docker.sh` - Complex (15 actions, 1 destructive) +5. `test_vm.sh` - Complex (9 actions, 2 destructive) +6. `test_notifications.sh` - Moderate (9 actions, 2 destructive) +7. `test_rclone.sh` - Simple (4 actions, 1 destructive) +8. `test_users.sh` - Moderate (8 actions, 1 destructive) +9. `test_keys.sh` - Simple (5 actions, 1 destructive) +10. `test_array.sh` - Moderate (8 actions, no destructive after removal) + +**Acceptance:** +- ✅ Each script tests all actions for its tool +- ✅ Tests validate response structure +- ✅ Destructive action gates are tested +- ✅ Scripts generate JSON reports +- ✅ Exit code indicates success/failure + +### Task 8: Build Test Runners + +**Files to create:** +- `tests/integration/run-all.sh` +- `tests/integration/run-tool.sh` + +**run-all.sh features:** +- Load credentials from `~/workspace/homelab/.env` +- Support sequential and parallel execution modes +- Run all 10 tool test scripts +- Generate summary report +- Return exit code based on any failures + +**run-tool.sh features:** +- Accept tool name as argument +- Load credentials +- Execute single tool test script +- Pass through exit code + +**Acceptance:** +- ✅ `run-all.sh` executes all tool tests +- ✅ Parallel mode works correctly (no race conditions) +- ✅ Summary report shows pass/fail/skip counts +- ✅ `run-tool.sh health` runs only health tests +- ✅ Exit codes are correct + +### Task 9: Document Action Catalog + +**File to create:** +- `docs/testing/action-catalog.md` + +**Content:** +- Table of all 86 actions across 10 tools +- For each action: + - Tool name + - Action name + - Type (query/mutation/compound) + - Required parameters + - Optional parameters + - Destructive? (yes/no + env var if yes) + - Expected response structure + - Example mcporter call + - Validation criteria + +**Acceptance:** +- ✅ All 86 actions documented +- ✅ Specifications are detailed and accurate +- ✅ Examples are runnable +- ✅ Becomes source of truth for test implementation + +### Task 10: Integration Documentation + +**Files to create/update:** +- `tests/integration/README.md` +- `docs/testing/integration-tests.md` +- `docs/testing/test-environments.md` +- `README.md` (add integration test section) + +**Content:** +- How to run integration tests +- How to configure mcporter +- Server setup (tootie/shart) +- Environment variable gates +- Destructive action testing +- CI/CD integration +- Troubleshooting + +**Acceptance:** +- ✅ Clear setup instructions +- ✅ Examples for common use cases +- ✅ Integration with existing pytest docs +- ✅ CI/CD pipeline documented + +## Testing Strategy + +### Unit Tests (pytest - existing) +- **150 tests** across 10 tool modules +- Mock GraphQL responses +- Fast, isolated, offline +- Cover edge cases and error paths + +### Integration Tests (mcporter - new) +- **86 tests** (one per action) +- Real Unraid server calls +- Slow, dependent, online +- Validate actual API behavior + +### Test Matrix + +| Tool | Actions | pytest Tests | mcporter Tests | Destructive | +|------|---------|--------------|----------------|-------------| +| health | 3 | 10 | 3 | 0 | +| info | 19 | 98 | 19 | 0 | +| storage | 6 | 11 | 6 | 0 | +| docker | 15 | 28 | 15 | 1 | +| vm | 9 | 25 | 9 | 2 | +| notifications | 9 | 7 | 9 | 2 | +| rclone | 4 | (pending) | 4 | 1 | +| users | 8 | (pending) | 8 | 1 | +| keys | 5 | (pending) | 5 | 1 | +| array | 8 | 26 | 8 | 0 | +| **TOTAL** | **86** | **~150** | **86** | **8** | + +## Validation Checklist + +### Code Changes +- [ ] Array tool has 8 actions (removed start/stop/shutdown/reboot) +- [ ] Settings class with 6 destructive gate flags +- [ ] All 6 tools updated with environment variable gates +- [ ] All 6 tool tests updated with gate test cases +- [ ] All existing 150 pytest tests pass +- [ ] `pydantic-settings` added to dependencies +- [ ] `.env.example` updated with gate documentation + +### Integration Tests +- [ ] mcporter configuration works for both servers +- [ ] All 3 helper libraries implemented +- [ ] All 10 tool test scripts implemented +- [ ] Test runners (run-all, run-tool) work correctly +- [ ] All 86 actions have test coverage +- [ ] Destructive action gates are tested +- [ ] Reports generate correctly + +### Documentation +- [ ] Action catalog documents all 86 actions +- [ ] Integration test README is clear +- [ ] Environment setup documented +- [ ] CI/CD integration documented +- [ ] Project README updated + +## Success Criteria + +1. **Safety:** Destructive actions require both env var AND confirm=True +2. **Coverage:** All 86 actions have integration tests +3. **Quality:** Clear error messages guide users to correct env vars +4. **Automation:** Test suite runs via single command +5. **Documentation:** Complete action catalog and testing guide + +## Risks & Mitigations + +### Risk: Breaking existing deployments +**Impact:** HIGH - Users suddenly can't execute destructive actions +**Mitigation:** +- Clear error messages with exact env var to set +- Document migration in release notes +- Default to disabled (safe) but guide users to enable + +### Risk: Integration tests are flaky +**Impact:** MEDIUM - CI/CD unreliable +**Mitigation:** +- Test against stable servers (tootie/shart) +- Implement retry logic for network errors +- Skip destructive tests if env vars not set (not failures) + +### Risk: mcporter configuration complexity +**Impact:** LOW - Difficult for contributors to run tests +**Mitigation:** +- Clear setup documentation +- Example .env template +- Helper script to validate setup + +## Dependencies + +- `pydantic-settings>=2.12` (Python package) +- `mcporter` (npm package - user must install) +- `jq` (system package for JSON parsing in bash) +- Access to tootie/shart servers (for integration tests) +- Credentials in `~/workspace/homelab/.env` + +## Timeline Estimate + +| Task | Estimated Time | +|------|---------------| +| 1. Remove array ops | 30 min | +| 2. Add settings infrastructure | 1 hour | +| 3. Update tools with gates | 2 hours | +| 4. Update test suite | 2 hours | +| 5. mcporter config | 30 min | +| 6. Helper libraries | 1.5 hours | +| 7. Tool test scripts | 4 hours | +| 8. Test runners | 1 hour | +| 9. Action catalog | 2 hours | +| 10. Documentation | 1.5 hours | +| **Total** | **~12 hours** | + +## Notes + +- Integration tests complement (not replace) existing pytest suite +- Tests validate actual Unraid API behavior, not just our code +- Environment variable gates provide defense-in-depth security +- mcporter enables real-world validation impossible with mocked tests +- Action catalog becomes living documentation for all tools + +--- + +**Plan Status:** Awaiting user approval +**Next Step:** Review plan, make adjustments, then execute via task list diff --git a/MARKETPLACE.md b/MARKETPLACE.md new file mode 100644 index 0000000..5d38ed6 --- /dev/null +++ b/MARKETPLACE.md @@ -0,0 +1,203 @@ +# Claude Code Marketplace Setup + +This document explains the Claude Code marketplace and plugin structure for the Unraid MCP project. + +## What Was Created + +### 1. Marketplace Manifest (`.claude-plugin/marketplace.json`) +The marketplace catalog that lists all available plugins in this repository. + +**Location:** `.claude-plugin/marketplace.json` + +**Contents:** +- Marketplace metadata (name, version, owner, repository) +- Plugin catalog with the "unraid" skill +- Categories and tags for discoverability + +### 2. Plugin Manifest (`skills/unraid/.claude-plugin/plugin.json`) +The individual plugin configuration for the Unraid skill. + +**Location:** `skills/unraid/.claude-plugin/plugin.json` + +**Contents:** +- Plugin name, version, author +- Repository and homepage links +- Plugin-specific metadata + +### 3. Documentation +- `.claude-plugin/README.md` - Marketplace installation guide +- Updated root `README.md` with plugin installation section + +### 4. Validation Script +- `scripts/validate-marketplace.sh` - Automated validation of marketplace structure + +## Installation Methods + +### Method 1: GitHub Distribution (Recommended for Users) + +Once you push this to GitHub, users can install via: + +```bash +# Add your marketplace +/plugin marketplace add jmagar/unraid-mcp + +# Install the Unraid skill +/plugin install unraid @unraid-mcp +``` + +### Method 2: Local Installation (Development) + +For testing locally before publishing: + +```bash +# Add local marketplace +/plugin marketplace add /home/jmagar/workspace/unraid-mcp + +# Install the plugin +/plugin install unraid @unraid-mcp +``` + +### Method 3: Direct URL + +Users can also install from a specific commit or branch: + +```bash +# From specific branch +/plugin marketplace add jmagar/unraid-mcp#main + +# From specific commit +/plugin marketplace add jmagar/unraid-mcp#abc123 +``` + +## Plugin Structure + +``` +unraid-mcp/ +├── .claude-plugin/ # Marketplace manifest +│ ├── marketplace.json +│ └── README.md +├── skills/unraid/ # Plugin directory +│ ├── .claude-plugin/ # Plugin manifest +│ │ └── plugin.json +│ ├── SKILL.md # Skill documentation +│ ├── README.md # Plugin documentation +│ ├── examples/ # Example scripts +│ ├── scripts/ # Helper scripts +│ └── references/ # API reference docs +└── scripts/ + └── validate-marketplace.sh # Validation tool +``` + +## Marketplace Metadata + +### Categories +- `infrastructure` - Server management and monitoring tools + +### Tags +- `unraid` - Unraid-specific functionality +- `monitoring` - System monitoring capabilities +- `homelab` - Homelab automation +- `graphql` - GraphQL API integration +- `docker` - Docker container management +- `virtualization` - VM management + +## Publishing Checklist + +Before publishing to GitHub: + +1. **Validate Structure** + ```bash + ./scripts/validate-marketplace.sh + ``` + +2. **Update Version Numbers** + - Bump version in `.claude-plugin/marketplace.json` + - Bump version in `skills/unraid/.claude-plugin/plugin.json` + - Update version in `README.md` if needed + +3. **Test Locally** + ```bash + /plugin marketplace add . + /plugin install unraid @unraid-mcp + ``` + +4. **Commit and Push** + ```bash + git add .claude-plugin/ skills/unraid/.claude-plugin/ + git commit -m "feat: add Claude Code marketplace configuration" + git push origin main + ``` + +5. **Create Release Tag** (Optional) + ```bash + git tag -a v1.0.0 -m "Release v1.0.0" + git push origin v1.0.0 + ``` + +## User Experience + +After installation, users will: + +1. **See the skill in their skill list** + ```bash + /skill list + ``` + +2. **Access Unraid functionality directly** + - Claude Code will automatically detect when to invoke the skill + - Users can explicitly invoke with `/unraid` + +3. **Have access to all helper scripts** + - Example scripts in `examples/` + - Utility scripts in `scripts/` + - API reference in `references/` + +## Maintenance + +### Updating the Plugin + +To release a new version: + +1. Make changes to the plugin +2. Update version in `skills/unraid/.claude-plugin/plugin.json` +3. Update marketplace catalog in `.claude-plugin/marketplace.json` +4. Run validation: `./scripts/validate-marketplace.sh` +5. Commit and push + +Users with the plugin installed will see the update available and can upgrade with: +```bash +/plugin update unraid +``` + +### Adding More Plugins + +To add additional plugins to this marketplace: + +1. Create new plugin directory: `skills/new-plugin/` +2. Add plugin manifest: `skills/new-plugin/.claude-plugin/plugin.json` +3. Update marketplace catalog: add entry to `.plugins[]` array in `.claude-plugin/marketplace.json` +4. Validate: `./scripts/validate-marketplace.sh` + +## Support + +- **Repository:** https://github.com/jmagar/unraid-mcp +- **Issues:** https://github.com/jmagar/unraid-mcp/issues +- **Documentation:** See `.claude-plugin/README.md` and `skills/unraid/README.md` + +## Validation + +Run the validation script anytime to ensure marketplace integrity: + +```bash +./scripts/validate-marketplace.sh +``` + +This checks: +- Manifest file existence and validity +- JSON syntax +- Required fields +- Plugin structure +- Source path accuracy +- Documentation completeness + +All 17 checks must pass before publishing. diff --git a/README.md b/README.md index ca13b4f..1fcd17b 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,7 @@ ## 📋 Table of Contents +- [Claude Code Plugin](#-claude-code-plugin) - [Quick Start](#-quick-start) - [Installation](#-installation) - [Configuration](#-configuration) @@ -31,6 +32,28 @@ --- +## 🎯 Claude Code Plugin + +**The easiest way to use Unraid MCP is through the Claude Code marketplace:** + +```bash +# Add the marketplace +/plugin marketplace add jmagar/unraid-mcp + +# Install the Unraid skill +/plugin install unraid @unraid-mcp +``` + +This provides instant access to Unraid monitoring and management through Claude Code with: +- 27 GraphQL API endpoints +- Real-time system metrics +- Disk health monitoring +- Docker and VM management + +**See [.claude-plugin/README.md](.claude-plugin/README.md) for detailed plugin documentation.** + +--- + ## 🚀 Quick Start ### Prerequisites diff --git a/dev.sh b/dev.sh deleted file mode 100755 index 6ee4856..0000000 --- a/dev.sh +++ /dev/null @@ -1,556 +0,0 @@ -#!/bin/bash - -# Unraid MCP Server Development Script -# Safely manages server processes during development with accurate process detection - -set -euo pipefail - -# Configuration -DEFAULT_PORT=6970 -PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -LOG_DIR="/tmp" -LOG_FILE="$LOG_DIR/unraid-mcp.log" -PID_FILE="$LOG_DIR/dev.pid" -# Ensure logs directory exists -mkdir -p "$LOG_DIR" - -# All colors are now handled by Rich logging system - -# Helper function for unified Rich logging -log() { - local message="$1" - local level="${2:-info}" - local indent="${3:-0}" - local file_timestamp="$(date +'%Y-%m-%d %H:%M:%S')" - - # Use unified Rich logger for beautiful console output - escape single quotes - local escaped_message="${message//\'/\'\"\'\"\'}" - uv run python -c "from unraid_mcp.config.logging import log_with_level_and_indent; log_with_level_and_indent('$escaped_message', '$level', $indent)" - - # File output without color - printf "[%s] %s\n" "$file_timestamp" "$message" >> "$LOG_FILE" -} - -# Convenience functions for different log levels -log_error() { log "$1" "error" "${2:-0}"; } -log_warning() { log "$1" "warning" "${2:-0}"; } -log_success() { log "$1" "success" "${2:-0}"; } -log_info() { log "$1" "info" "${2:-0}"; } -log_status() { log "$1" "status" "${2:-0}"; } - -# Rich header function -log_header() { - uv run python -c "from unraid_mcp.config.logging import log_header; log_header('$1')" -} - -# Rich separator function -log_separator() { - uv run python -c "from unraid_mcp.config.logging import log_separator; log_separator()" -} - -# Get port from environment or use default -get_port() { - local port="${UNRAID_MCP_PORT:-$DEFAULT_PORT}" - echo "$port" -} - -# Write PID to file -write_pid_file() { - local pid=$1 - echo "$pid" > "$PID_FILE" -} - -# Read PID from file -read_pid_file() { - if [[ -f "$PID_FILE" ]]; then - cat "$PID_FILE" 2>/dev/null - fi -} - -# Check if PID file contains valid running process -is_pid_valid() { - local pid=$1 - [[ -n "$pid" ]] && [[ "$pid" =~ ^[0-9]+$ ]] && kill -0 "$pid" 2>/dev/null -} - -# Clean up PID file -cleanup_pid_file() { - if [[ -f "$PID_FILE" ]]; then - rm -f "$PID_FILE" - log_info "🗑️ Cleaned up PID file" - fi -} - -# Get PID from PID file if valid, otherwise return empty -get_valid_pid_from_file() { - local pid=$(read_pid_file) - if is_pid_valid "$pid"; then - echo "$pid" - else - # Clean up stale PID file - [[ -f "$PID_FILE" ]] && cleanup_pid_file - echo "" - fi -} - -# Find processes using multiple detection methods -find_server_processes() { - local port=$(get_port) - local pids=() - - # Method 0: Check PID file first (most reliable) - local pid_from_file=$(get_valid_pid_from_file) - if [[ -n "$pid_from_file" ]]; then - log_status "🔍 Found server PID from file: $pid_from_file" - pids+=("$pid_from_file") - fi - - # Method 1: Command line pattern matching (fallback) - while IFS= read -r line; do - if [[ -n "$line" ]]; then - local pid=$(echo "$line" | awk '{print $2}') - # Add to pids if not already present - if [[ ! " ${pids[@]} " =~ " $pid " ]]; then - pids+=("$pid") - fi - fi - done < <(ps aux | grep -E 'python.*unraid.*mcp|python.*main\.py|uv run.*main\.py|uv run -m unraid_mcp' | grep -v grep | grep -v "$0") - - # Method 2: Port binding verification (fallback) - if command -v lsof >/dev/null 2>&1; then - while IFS= read -r line; do - if [[ -n "$line" ]]; then - local pid=$(echo "$line" | awk '{print $2}') - # Add to pids if not already present - if [[ ! " ${pids[@]} " =~ " $pid " ]]; then - pids+=("$pid") - fi - fi - done < <(lsof -i ":$port" 2>/dev/null | grep LISTEN || true) - fi - - # Method 3: Working directory verification for fallback methods - local verified_pids=() - for pid in "${pids[@]}"; do - # Skip if not a valid PID - if ! [[ "$pid" =~ ^[0-9]+$ ]]; then - continue - fi - - # If this PID came from the PID file, it's already verified - if [[ "$pid" == "$pid_from_file" ]]; then - verified_pids+=("$pid") - continue - fi - - # Verify other PIDs by working directory - if [[ -d "/proc/$pid" ]]; then - local pwd_info="" - if command -v pwdx >/dev/null 2>&1; then - pwd_info=$(pwdx "$pid" 2>/dev/null | cut -d' ' -f2- || echo "unknown") - else - pwd_info=$(readlink -f "/proc/$pid/cwd" 2>/dev/null || echo "unknown") - fi - - # Verify it's running from our project directory or a parent directory - if [[ "$pwd_info" == "$PROJECT_DIR"* ]] || [[ "$pwd_info" == *"unraid-mcp"* ]]; then - verified_pids+=("$pid") - fi - fi - done - - # Output final list - printf '%s\n' "${verified_pids[@]}" | grep -E '^[0-9]+$' || true -} - -# Terminate a process gracefully, then forcefully if needed -terminate_process() { - local pid=$1 - local name=${2:-"process"} - - if ! kill -0 "$pid" 2>/dev/null; then - log_warning "⚠️ Process $pid ($name) already terminated" - return 0 - fi - - log_warning "🔄 Terminating $name (PID: $pid)..." - - # Step 1: Graceful shutdown (SIGTERM) - log_info "→ Sending SIGTERM to PID $pid" 1 - kill -TERM "$pid" 2>/dev/null || { - log_warning "⚠️ Failed to send SIGTERM (process may have died)" 2 - return 0 - } - - # Step 2: Wait for graceful shutdown (5 seconds) - local count=0 - while [[ $count -lt 5 ]]; do - if ! kill -0 "$pid" 2>/dev/null; then - log_success "✅ Process $pid terminated gracefully" 1 - - # Clean up PID file if this was our server process - local pid_from_file=$(read_pid_file) - if [[ "$pid" == "$pid_from_file" ]]; then - cleanup_pid_file - fi - - return 0 - fi - sleep 1 - ((count++)) - log_info "⏳ Waiting for graceful shutdown... (${count}/5)" 2 - done - - # Step 3: Force kill (SIGKILL) - log_error "⚡ Graceful shutdown timeout, sending SIGKILL to PID $pid" 1 - kill -KILL "$pid" 2>/dev/null || { - log_warning "⚠️ Failed to send SIGKILL (process may have died)" 2 - return 0 - } - - # Step 4: Final verification - sleep 1 - if kill -0 "$pid" 2>/dev/null; then - log_error "❌ Failed to terminate process $pid" 1 - return 1 - else - log_success "✅ Process $pid terminated forcefully" 1 - - # Clean up PID file if this was our server process - local pid_from_file=$(read_pid_file) - if [[ "$pid" == "$pid_from_file" ]]; then - cleanup_pid_file - fi - - return 0 - fi -} - -# Stop all server processes -stop_servers() { - log_header "Server Shutdown" - log_error "🛑 Stopping existing server processes..." - - local pids=($(find_server_processes)) - - if [[ ${#pids[@]} -eq 0 ]]; then - log_success "✅ No processes to stop" - return 0 - fi - - local failed=0 - for pid in "${pids[@]}"; do - if ! terminate_process "$pid" "Unraid MCP Server"; then - ((failed++)) - fi - done - - # Wait for ports to be released - local port=$(get_port) - log_info "⏳ Waiting for port $port to be released..." - local port_wait=0 - while [[ $port_wait -lt 3 ]]; do - if ! lsof -i ":$port" >/dev/null 2>&1; then - log_success "✅ Port $port released" 1 - break - fi - sleep 1 - ((port_wait++)) - done - - if [[ $failed -gt 0 ]]; then - log_error "⚠️ Failed to stop $failed process(es)" - return 1 - else - log_success "✅ All processes stopped successfully" - return 0 - fi -} - -# Start the new modular server -start_modular_server() { - log_header "Modular Server Startup" - log_success "🚀 Starting modular server..." - - cd "$PROJECT_DIR" - - # Check if main.py exists in unraid_mcp/ - if [[ ! -f "unraid_mcp/main.py" ]]; then - log_error "❌ unraid_mcp/main.py not found. Make sure modular server is implemented." - return 1 - fi - - # Clear the log file and add a startup marker to capture fresh logs - echo "=== Server Starting at $(date) ===" > "$LOG_FILE" - - # Start server in background using module syntax - log_info "→ Executing: uv run -m unraid_mcp.main" 1 - # Start server in new process group to isolate it from parent signals - setsid nohup uv run -m unraid_mcp.main >> "$LOG_FILE" 2>&1 & - local pid=$! - - # Write PID to file - write_pid_file "$pid" - log_info "📝 Written PID $pid to file: $PID_FILE" 1 - - # Give it a moment to start and write some logs - sleep 3 - - # Check if it's still running - if kill -0 "$pid" 2>/dev/null; then - local port=$(get_port) - log_success "✅ Modular server started successfully (PID: $pid, Port: $port)" - log_info "📋 Process info: $(ps -p "$pid" -o pid,ppid,cmd --no-headers 2>/dev/null || echo 'Process info unavailable')" 1 - - # Auto-tail logs after successful start - echo "" - log_success "📄 Following server logs in real-time..." - log_info "ℹ️ Press Ctrl+C to stop following logs (server will continue running)" 1 - log_separator - echo "" - - # Set up signal handler for graceful exit from log following - trap 'handle_log_interrupt' SIGINT - - # Start tailing from beginning of the fresh log file - tail -f "$LOG_FILE" - - return 0 - else - log_error "❌ Modular server failed to start" - cleanup_pid_file - log_warning "📄 Check $LOG_FILE for error details" - return 1 - fi -} - -# Start the original server -start_original_server() { - log_header "Original Server Startup" - log_success "🚀 Starting original server..." - - cd "$PROJECT_DIR" - - # Check if original server exists - if [[ ! -f "unraid_mcp_server.py" ]]; then - log_error "❌ unraid_mcp_server.py not found" - return 1 - fi - - # Clear the log file and add a startup marker to capture fresh logs - echo "=== Server Starting at $(date) ===" > "$LOG_FILE" - - # Start server in background - log_info "→ Executing: uv run unraid_mcp_server.py" 1 - # Start server in new process group to isolate it from parent signals - setsid nohup uv run unraid_mcp_server.py >> "$LOG_FILE" 2>&1 & - local pid=$! - - # Write PID to file - write_pid_file "$pid" - log_info "📝 Written PID $pid to file: $PID_FILE" 1 - - # Give it a moment to start and write some logs - sleep 3 - - # Check if it's still running - if kill -0 "$pid" 2>/dev/null; then - local port=$(get_port) - log_success "✅ Original server started successfully (PID: $pid, Port: $port)" - log_info "📋 Process info: $(ps -p "$pid" -o pid,ppid,cmd --no-headers 2>/dev/null || echo 'Process info unavailable')" 1 - - # Auto-tail logs after successful start - echo "" - log_success "📄 Following server logs in real-time..." - log_info "ℹ️ Press Ctrl+C to stop following logs (server will continue running)" 1 - log_separator - echo "" - - # Set up signal handler for graceful exit from log following - trap 'handle_log_interrupt' SIGINT - - # Start tailing from beginning of the fresh log file - tail -f "$LOG_FILE" - - return 0 - else - log_error "❌ Original server failed to start" - cleanup_pid_file - log_warning "📄 Check $LOG_FILE for error details" - return 1 - fi -} - -# Show usage information -show_usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Development script for Unraid MCP Server" - echo "" - echo "OPTIONS:" - echo " (no args) Stop existing servers, start modular server, and tail logs" - echo " --old Stop existing servers, start original server, and tail logs" - echo " --kill Stop existing servers only (don't start new one)" - echo " --status Show status of running servers" - echo " --logs [N] Show last N lines of server logs (default: 50)" - echo " --tail Follow server logs in real-time (without restarting server)" - echo " --help, -h Show this help message" - echo "" - echo "ENVIRONMENT VARIABLES:" - echo " UNRAID_MCP_PORT Port for server (default: $DEFAULT_PORT)" - echo "" - echo "EXAMPLES:" - echo " ./dev.sh # Restart with modular server" - echo " ./dev.sh --old # Restart with original server" - echo " ./dev.sh --kill # Stop all servers" - echo " ./dev.sh --status # Check server status" - echo " ./dev.sh --logs # Show last 50 lines of logs" - echo " ./dev.sh --logs 100 # Show last 100 lines of logs" - echo " ./dev.sh --tail # Follow logs in real-time" -} - -# Show server status -show_status() { - local port=$(get_port) - log_header "Server Status" - log_status "🔍 Server Status Check" - log_info "📁 Project Directory: $PROJECT_DIR" 1 - log_info "📝 PID File: $PID_FILE" 1 - log_info "🔌 Expected Port: $port" 1 - echo "" - - # Check PID file status - local pid_from_file=$(read_pid_file) - if [[ -n "$pid_from_file" ]]; then - if is_pid_valid "$pid_from_file"; then - log_success "✅ PID File: Contains valid PID $pid_from_file" 1 - else - log_warning "⚠️ PID File: Contains stale PID $pid_from_file (process not running)" 1 - fi - else - log_warning "🚫 PID File: Not found or empty" 1 - fi - echo "" - - local pids=($(find_server_processes)) - - if [[ ${#pids[@]} -eq 0 ]]; then - log_warning "🟡 Status: No servers running" 1 - else - log_success "✅ Status: ${#pids[@]} server(s) running" 1 - for pid in "${pids[@]}"; do - local cmd=$(ps -p "$pid" -o cmd --no-headers 2>/dev/null || echo "Command unavailable") - local source="process scan" - if [[ "$pid" == "$pid_from_file" ]]; then - source="PID file" - fi - log_success "PID $pid ($source): $cmd" 2 - done - fi - - # Check port binding - if command -v lsof >/dev/null 2>&1; then - local port_info=$(lsof -i ":$port" 2>/dev/null | grep LISTEN || echo "") - if [[ -n "$port_info" ]]; then - log_success "Port $port: BOUND" 1 - echo "$port_info" | while IFS= read -r line; do - log_info "$line" 2 - done - else - log_warning "Port $port: FREE" 1 - fi - fi -} - -# Tail the server logs -tail_logs() { - local lines="${1:-50}" - - log_info "📄 Tailing last $lines lines from server logs..." - - if [[ ! -f "$LOG_FILE" ]]; then - log_error "❌ Log file not found: $LOG_FILE" - return 1 - fi - - echo "" - echo "=== Server Logs (last $lines lines) ===" - tail -n "$lines" "$LOG_FILE" - echo "=== End of Logs ====" - echo "" -} - -# Handle SIGINT during log following -handle_log_interrupt() { - echo "" - log_info "📄 Stopped following logs. Server continues running in background." - log_info "💡 Use './dev.sh --status' to check server status" 1 - log_info "💡 Use './dev.sh --tail' to resume following logs" 1 - exit 0 -} - -# Follow server logs in real-time -follow_logs() { - log_success "📄 Following server logs in real-time..." - log_info "ℹ️ Press Ctrl+C to stop following logs" - - if [[ ! -f "$LOG_FILE" ]]; then - log_error "❌ Log file not found: $LOG_FILE" - return 1 - fi - - # Set up signal handler for graceful exit - trap 'handle_log_interrupt' SIGINT - - log_separator - echo "" - tail -f "$LOG_FILE" -} - -# Main script logic -main() { - # Initialize log file - echo "=== Dev Script Started at $(date) ===" >> "$LOG_FILE" - - case "${1:-}" in - --help|-h) - show_usage - ;; - --status) - show_status - ;; - --kill) - stop_servers - ;; - --logs) - tail_logs "${2:-50}" - ;; - --tail) - follow_logs - ;; - --old) - if stop_servers; then - start_original_server - else - log_error "❌ Failed to stop existing servers" - exit 1 - fi - ;; - "") - if stop_servers; then - start_modular_server - else - log_error "❌ Failed to stop existing servers" - exit 1 - fi - ;; - *) - log_error "❌ Unknown option: $1" - show_usage - exit 1 - ;; - esac -} - -# Run main function with all arguments -main "$@" \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 7d3c92f..c62c23d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,7 +75,7 @@ dependencies = [ "python-dotenv>=1.1.1", "fastmcp>=2.11.2", "httpx>=0.28.1", - "fastapi>=0.116.1", + "fastapi>=0.115.0", "uvicorn[standard]>=0.35.0", "websockets>=13.1,<14.0", "rich>=14.1.0", diff --git a/scripts/validate-marketplace.sh b/scripts/validate-marketplace.sh new file mode 100755 index 0000000..afb7549 --- /dev/null +++ b/scripts/validate-marketplace.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +# Validate Claude Code marketplace and plugin structure + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Counters +CHECKS=0 +PASSED=0 +FAILED=0 + +check() { + local test_name="$1" + local test_cmd="$2" + + CHECKS=$((CHECKS + 1)) + echo -n "Checking: $test_name... " + + if eval "$test_cmd" > /dev/null 2>&1; then + echo -e "${GREEN}✓${NC}" + PASSED=$((PASSED + 1)) + return 0 + else + echo -e "${RED}✗${NC}" + FAILED=$((FAILED + 1)) + return 1 + fi +} + +echo "=== Validating Claude Code Marketplace Structure ===" +echo "" + +# Check marketplace manifest +check "Marketplace manifest exists" "test -f .claude-plugin/marketplace.json" +check "Marketplace manifest is valid JSON" "jq empty .claude-plugin/marketplace.json" +check "Marketplace has name" "jq -e '.name' .claude-plugin/marketplace.json" +check "Marketplace has plugins array" "jq -e '.plugins | type == \"array\"' .claude-plugin/marketplace.json" + +# Check plugin manifest +check "Plugin manifest exists" "test -f skills/unraid/.claude-plugin/plugin.json" +check "Plugin manifest is valid JSON" "jq empty skills/unraid/.claude-plugin/plugin.json" +check "Plugin has name" "jq -e '.name' skills/unraid/.claude-plugin/plugin.json" +check "Plugin has version" "jq -e '.version' skills/unraid/.claude-plugin/plugin.json" + +# Check plugin structure +check "Plugin has SKILL.md" "test -f skills/unraid/SKILL.md" +check "Plugin has README.md" "test -f skills/unraid/README.md" +check "Plugin has scripts directory" "test -d skills/unraid/scripts" +check "Plugin has examples directory" "test -d skills/unraid/examples" +check "Plugin has references directory" "test -d skills/unraid/references" + +# Validate plugin is listed in marketplace +check "Plugin listed in marketplace" "jq -e '.plugins[] | select(.name == \"unraid\")' .claude-plugin/marketplace.json" + +# Check marketplace metadata +check "Marketplace has repository" "jq -e '.repository' .claude-plugin/marketplace.json" +check "Marketplace has owner" "jq -e '.owner' .claude-plugin/marketplace.json" + +# Verify source path +PLUGIN_SOURCE=$(jq -r '.plugins[] | select(.name == "unraid") | .source' .claude-plugin/marketplace.json) +check "Plugin source path is valid" "test -d \"$PLUGIN_SOURCE\"" + +echo "" +echo "=== Results ===" +echo -e "Total checks: $CHECKS" +echo -e "${GREEN}Passed: $PASSED${NC}" +if [ $FAILED -gt 0 ]; then + echo -e "${RED}Failed: $FAILED${NC}" + exit 1 +else + echo -e "${GREEN}All checks passed!${NC}" + echo "" + echo "Marketplace is ready for distribution at:" + echo " https://github.com/$(jq -r '.repository' .claude-plugin/marketplace.json | sed 's|https://github.com/||')" +fi diff --git a/skills/unraid/.claude-plugin/plugin.json b/skills/unraid/.claude-plugin/plugin.json new file mode 100644 index 0000000..d6bd73c --- /dev/null +++ b/skills/unraid/.claude-plugin/plugin.json @@ -0,0 +1,27 @@ +{ + "name": "unraid", + "description": "Query and monitor Unraid servers via GraphQL API - array status, disk health, containers, VMs, system monitoring", + "version": "1.1.0", + "author": { + "name": "jmagar", + "email": "jmagar@users.noreply.github.com" + }, + "homepage": "https://github.com/jmagar/unraid-mcp", + "repository": "https://github.com/jmagar/unraid-mcp", + "mcpServers": { + "unraid": { + "command": "uv", + "args": [ + "run", + "--directory", + "${CLAUDE_PLUGIN_ROOT}/../..", + "unraid-mcp-server" + ], + "env": { + "UNRAID_API_URL": "${UNRAID_API_URL}", + "UNRAID_API_KEY": "${UNRAID_API_KEY}", + "UNRAID_MCP_TRANSPORT": "stdio" + } + } + } +} diff --git a/skills/unraid/README.md b/skills/unraid/README.md new file mode 100644 index 0000000..e3495fe --- /dev/null +++ b/skills/unraid/README.md @@ -0,0 +1,149 @@ +# Unraid API Skill + +Query and monitor Unraid servers via the GraphQL API. + +## What's Included + +This skill provides complete access to all 27 read-only Unraid GraphQL API endpoints. + +### Files + +``` +skills/unraid/ +├── SKILL.md # Main skill documentation +├── README.md # This file +├── scripts/ +│ └── unraid-query.sh # GraphQL query helper script +├── examples/ +│ ├── monitoring-dashboard.sh # Complete system dashboard +│ ├── disk-health.sh # Disk temperature & health check +│ └── read-logs.sh # Log file reader +└── references/ + ├── api-reference.md # Complete API documentation + └── quick-reference.md # Common queries cheat sheet +``` + +## Quick Start + +1. **Set your credentials:** + ```bash + export UNRAID_URL="https://your-unraid-server/graphql" + export UNRAID_API_KEY="your-api-key" + ``` + +2. **Run a query:** + ```bash + cd skills/unraid + ./scripts/unraid-query.sh -q "{ online }" + ``` + +3. **Run examples:** + ```bash + ./examples/monitoring-dashboard.sh + ./examples/disk-health.sh + ``` + +## Triggers + +This skill activates when you mention: +- "check Unraid" +- "monitor Unraid" +- "Unraid API" +- "Unraid disk temperatures" +- "Unraid array status" +- "read Unraid logs" +- And more Unraid-related monitoring tasks + +## Features + +- **27 working endpoints** - All read-only queries documented +- **Helper script** - Easy CLI interface for GraphQL queries +- **Example scripts** - Ready-to-use monitoring scripts +- **Complete reference** - Detailed documentation with examples +- **Quick reference** - Common queries cheat sheet + +## Endpoints Covered + +### System & Monitoring +- System info (CPU, OS, hardware) +- Real-time metrics (CPU %, memory %) +- Configuration & settings +- Log files (list & read) + +### Storage +- Array status & disks +- All physical disks (including cache/USB) +- Network shares +- Parity check status + +### Virtualization +- Docker containers +- Virtual machines + +### Power & Alerts +- UPS devices +- System notifications + +### Administration +- API key management +- User & authentication +- Server registration +- UI customization + +## Requirements + +- **Unraid 7.2+** (GraphQL API) +- **API Key** with Viewer role +- **jq** for JSON parsing (usually pre-installed) +- **curl** for HTTP requests + +## Getting an API Key + +1. Log in to Unraid WebGUI +2. Settings → Management Access → API Keys +3. Click "Create API Key" +4. Name: "monitoring" (or whatever you like) +5. Role: Select "Viewer" (read-only) +6. Copy the generated key + +## Documentation + +- **SKILL.md** - Start here for task-oriented guidance +- **references/api-reference.md** - Complete endpoint reference +- **references/quick-reference.md** - Quick query examples + +## Examples + +### System Status +```bash +./scripts/unraid-query.sh -q "{ online metrics { cpu { percentTotal } } }" +``` + +### Disk Health +```bash +./examples/disk-health.sh +``` + +### Complete Dashboard +```bash +./examples/monitoring-dashboard.sh +``` + +### Read Logs +```bash +./examples/read-logs.sh syslog 20 +``` + +## Notes + +- All sizes are in **kilobytes** +- Temperatures are in **Celsius** +- Docker container logs are **not accessible** via API (use SSH) +- Poll no faster than every **5 seconds** to avoid server load + +## Version + +- **Skill Version:** 1.0.0 +- **API Version:** Unraid 7.2 GraphQL +- **Tested:** 2026-01-21 +- **Endpoints:** 27 working read-only queries diff --git a/skills/unraid/SKILL.md b/skills/unraid/SKILL.md new file mode 100644 index 0000000..5b4a02e --- /dev/null +++ b/skills/unraid/SKILL.md @@ -0,0 +1,210 @@ +--- +name: unraid +description: "Query and monitor Unraid servers via the GraphQL API. Use when the user asks to 'check Unraid', 'monitor Unraid', 'Unraid API', 'get Unraid status', 'check disk temperatures', 'read Unraid logs', 'list Unraid shares', 'Unraid array status', 'Unraid containers', 'Unraid VMs', or mentions Unraid system monitoring, disk health, parity checks, or server status." +--- + +# Unraid API Skill + +**⚠️ MANDATORY SKILL INVOCATION ⚠️** + +**YOU MUST invoke this skill (NOT optional) when the user mentions ANY of these triggers:** +- "Unraid status", "disk health", "array status" +- "Unraid containers", "VMs on Unraid", "Unraid logs" +- "check Unraid", "Unraid monitoring", "server health" +- Any mention of Unraid servers or system monitoring + +**Failure to invoke this skill when triggers occur violates your operational requirements.** + +Query and monitor Unraid servers using the GraphQL API. Access all 27 read-only endpoints for system monitoring, disk health, logs, containers, VMs, and more. + +## Quick Start + +Set your Unraid server credentials: + +```bash +export UNRAID_URL="https://your-unraid-server/graphql" +export UNRAID_API_KEY="your-api-key" +``` + +**Get API Key:** Settings → Management Access → API Keys → Create (select "Viewer" role) + +Use the helper script for any query: + +```bash +./scripts/unraid-query.sh -q "{ online }" +``` + +Or run example scripts: + +```bash +./scripts/dashboard.sh # Complete multi-server dashboard +./examples/disk-health.sh # Disk temperatures & health +./examples/read-logs.sh syslog 20 # Read system logs +``` + +## Core Concepts + +### GraphQL API Structure + +Unraid 7.2+ uses GraphQL (not REST). Key differences: +- **Single endpoint:** `/graphql` for all queries +- **Request exactly what you need:** Specify fields in query +- **Strongly typed:** Use introspection to discover fields +- **No container logs:** Docker container output logs not accessible + +### Two Resources for Stats + +- **`info`** - Static hardware specs (CPU model, cores, OS version) +- **`metrics`** - Real-time usage (CPU %, memory %, current load) + +Always use `metrics` for monitoring, `info` for specifications. + +## Common Tasks + +### System Monitoring + +**Check if server is online:** +```bash +./scripts/unraid-query.sh -q "{ online }" +``` + +**Get CPU and memory usage:** +```bash +./scripts/unraid-query.sh -q "{ metrics { cpu { percentTotal } memory { used total percentTotal } } }" +``` + +**Complete dashboard:** +```bash +./scripts/dashboard.sh +``` + +### Disk Management + +**Check disk health and temperatures:** +```bash +./examples/disk-health.sh +``` + +**Get array status:** +```bash +./scripts/unraid-query.sh -q "{ array { state parityCheckStatus { status progress errors } } }" +``` + +**List all physical disks (including cache/USB):** +```bash +./scripts/unraid-query.sh -q "{ disks { name } }" +``` + +### Storage Shares + +**List network shares:** +```bash +./scripts/unraid-query.sh -q "{ shares { name comment } }" +``` + +### Logs + +**List available logs:** +```bash +./scripts/unraid-query.sh -q "{ logFiles { name size modifiedAt } }" +``` + +**Read log content:** +```bash +./examples/read-logs.sh syslog 20 +``` + +### Containers & VMs + +**List Docker containers:** +```bash +./scripts/unraid-query.sh -q "{ docker { containers { names image state status } } }" +``` + +**List VMs:** +```bash +./scripts/unraid-query.sh -q "{ vms { name state cpus memory } } }" +``` + +**Note:** Container output logs are NOT accessible via API. Use `docker logs` via SSH. + +### Notifications + +**Get notification counts:** +```bash +./scripts/unraid-query.sh -q "{ notifications { overview { unread { info warning alert total } } } }" +``` + +## Helper Script Usage + +The `scripts/unraid-query.sh` helper supports: + +```bash +# Basic usage +./scripts/unraid-query.sh -u URL -k API_KEY -q "QUERY" + +# Use environment variables +export UNRAID_URL="https://unraid.local/graphql" +export UNRAID_API_KEY="your-key" +./scripts/unraid-query.sh -q "{ online }" + +# Format options +-f json # Raw JSON (default) +-f pretty # Pretty-printed JSON +-f raw # Just the data (no wrapper) +``` + +## Additional Resources + +### Reference Files + +For detailed documentation, consult: +- **`references/endpoints.md`** - Complete list of all 27 API endpoints +- **`references/troubleshooting.md`** - Common errors and solutions +- **`references/api-reference.md`** - Detailed field documentation + +### Helper Scripts + +- **`scripts/unraid-query.sh`** - Main GraphQL query tool +- **`scripts/dashboard.sh`** - Automated multi-server inventory reporter + +## Quick Command Reference + +```bash +# System status +./scripts/unraid-query.sh -q "{ online metrics { cpu { percentTotal } } }" + +# Disk health +./examples/disk-health.sh + +# Array status +./scripts/unraid-query.sh -q "{ array { state } }" + +# Read logs +./examples/read-logs.sh syslog 20 + +# Complete dashboard +./scripts/dashboard.sh + +# List shares +./scripts/unraid-query.sh -q "{ shares { name } }" + +# List containers +./scripts/unraid-query.sh -q "{ docker { containers { names state } } }" +``` + +--- + +## 🔧 Agent Tool Usage Requirements + +**CRITICAL:** When invoking scripts from this skill via the zsh-tool, **ALWAYS use `pty: true`**. + +Without PTY mode, command output will not be visible even though commands execute successfully. + +**Correct invocation pattern:** +```typescript + +./skills/SKILL_NAME/scripts/SCRIPT.sh [args] +true + +``` diff --git a/skills/unraid/examples/disk-health.sh b/skills/unraid/examples/disk-health.sh new file mode 100755 index 0000000..25dbc0d --- /dev/null +++ b/skills/unraid/examples/disk-health.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Check disk health and temperatures +# Quick overview of all disks with temperature warnings + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +QUERY_SCRIPT="$SCRIPT_DIR/../scripts/unraid-query.sh" + +QUERY='{ array { disks { name device temp status isSpinning } } }' + +echo "=== Disk Health Report ===" +echo "" + +RESPONSE=$("$QUERY_SCRIPT" -q "$QUERY" -f raw) + +echo "$RESPONSE" | jq -r '.array.disks[] | "\(.name) (\(.device)): \(.temp)°C - \(.status) - \(if .isSpinning then "Spinning" else "Spun down" end)"' + +echo "" +echo "Temperature warnings:" +echo "$RESPONSE" | jq -r '.array.disks[] | select(.temp > 45) | "⚠️ \(.name): \(.temp)°C (HIGH)"' + +HOTTEST=$(echo "$RESPONSE" | jq -r '[.array.disks[].temp] | max') +echo "" +echo "Hottest disk: ${HOTTEST}°C" diff --git a/skills/unraid/examples/read-logs.sh b/skills/unraid/examples/read-logs.sh new file mode 100755 index 0000000..b7a58be --- /dev/null +++ b/skills/unraid/examples/read-logs.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Read Unraid system logs +# Usage: ./read-logs.sh [log-name] [lines] + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +QUERY_SCRIPT="$SCRIPT_DIR/../scripts/unraid-query.sh" + +LOG_NAME="${1:-syslog}" +LINES="${2:-20}" + +echo "=== Reading $LOG_NAME (last $LINES lines) ===" +echo "" + +QUERY="{ logFile(path: \"$LOG_NAME\", lines: $LINES) { path totalLines startLine content } }" + +RESPONSE=$("$QUERY_SCRIPT" -q "$QUERY" -f raw) + +echo "$RESPONSE" | jq -r '.logFile.content' + +echo "" +echo "---" +echo "Total lines in log: $(echo "$RESPONSE" | jq -r '.logFile.totalLines')" +echo "Showing from line: $(echo "$RESPONSE" | jq -r '.logFile.startLine')" diff --git a/skills/unraid/references/api-reference.md b/skills/unraid/references/api-reference.md new file mode 100644 index 0000000..f1d38f3 --- /dev/null +++ b/skills/unraid/references/api-reference.md @@ -0,0 +1,946 @@ +# Unraid API - Complete Reference Guide + +**Tested on:** Unraid 7.2 x86_64 +**Date:** 2026-01-21 +**API Type:** GraphQL +**Base URL:** `https://YOUR-UNRAID-SERVER/graphql` + +--- + +## 📊 Summary + +Out of 46 total GraphQL query endpoints: +- **✅ 27 fully working read-only endpoints** +- **⚠️ 1 works but returns empty** (`plugins`) +- **❌ 3 return null** (`flash`, `parityHistory`, `services`) +- **❓ 15 untested** (mostly write/mutation operations) + +--- + +## Authentication + +All requests require the `x-api-key` header: + +```bash +-H "x-api-key: YOUR_API_KEY_HERE" +``` + +### How to Generate API Key: +1. Log in to Unraid WebGUI +2. Settings → Management Access → API Keys +3. Create API Key with **Viewer** role (read-only) +4. Copy the generated key + +--- + +## 🎯 All 27 Working Read-Only Endpoints + +### 1. System Info & Metrics + +#### **info** - Hardware Specifications +Get CPU, OS, motherboard, and hardware details. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ info { time cpu { model cores threads } os { platform distro release arch } system { manufacturer model version uuid } } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "info": { + "time": "2026-01-21T12:57:22.539Z", + "cpu": { + "model": "183", + "cores": 16, + "threads": 24 + }, + "os": { + "platform": "linux", + "distro": "Unraid OS", + "release": "7.2 x86_64", + "arch": "x64" + }, + "system": { + "manufacturer": "Micro-Star International Co., Ltd.", + "model": "MS-7E07", + "version": "1.0", + "uuid": "fec05753-077c-8e18-a089-047c1644678a" + } + } + } +} +``` + +--- + +#### **metrics** - Real-Time Usage Stats +Get current CPU and memory usage percentages. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ metrics { cpu { percentTotal } memory { total used free percentTotal swapTotal swapUsed swapFree } } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "metrics": { + "cpu": { + "percentTotal": 20.99 + }, + "memory": { + "total": 134773903360, + "used": 129472622592, + "free": 5301280768, + "percentTotal": 59.97, + "swapTotal": 0, + "swapUsed": 0, + "swapFree": 0 + } + } + } +} +``` + +**Note:** Memory values are in bytes. + +--- + +#### **online** - Server Online Status +Simple boolean check if server is online. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ "query": "{ online }" }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "online": true + } +} +``` + +--- + +#### **isInitialSetup** - Initial Setup Status +Check if server has completed initial setup. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ "query": "{ isInitialSetup }" }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "isInitialSetup": false + } +} +``` + +--- + +### 2. Storage & Disks + +#### **array** - Array Status & Disks +Get array state, disk details, temperatures, and capacity. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ array { state disks { id name device size status temp fsSize fsFree fsUsed fsType rotational isSpinning } parityCheckStatus { status progress errors speed } } }" + }' | jq '.' +``` + +**Response (sample):** +```json +{ + "data": { + "array": { + "state": "STARTED", + "disks": [ + { + "id": "3cb1026338736ed07b8afec2c484e429710b0f6550dc65d0c5c410ea9d0fa6b2:WDC_WD120EDBZ-11B1HA0_5QGWN5DF", + "name": "disk1", + "device": "sdb", + "size": 11718885324, + "status": "DISK_OK", + "temp": 38, + "fsSize": 11998001574, + "fsFree": 1692508541, + "fsUsed": 10305493033, + "fsType": "xfs", + "rotational": true, + "isSpinning": true + } + ], + "parityCheckStatus": { + "status": "NEVER_RUN", + "progress": 0, + "errors": null, + "speed": "0" + } + } + } +} +``` + +**Note:** Sizes are in kilobytes. Temperature in Celsius. + +--- + +#### **disks** - All Physical Disks +Get ALL disks including array disks, cache SSDs, and boot USB. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ disks { id name } }" + }' | jq '.' +``` + +**Response (sample):** +```json +{ + "data": { + "disks": [ + { + "id": "3cb1026338736ed07b8afec2c484e429710b0f6550dc65d0c5c410ea9d0fa6b2:04009732070823130633", + "name": "Cruzer Glide" + }, + { + "id": "3cb1026338736ed07b8afec2c484e429710b0f6550dc65d0c5c410ea9d0fa6b2:5QGWN5DF", + "name": "WDC WD120EDBZ-11B1HA0" + }, + { + "id": "3cb1026338736ed07b8afec2c484e429710b0f6550dc65d0c5c410ea9d0fa6b2:S6S2NS0TB18572X", + "name": "Samsung SSD 970 EVO Plus 2TB" + } + ] + } +} +``` + +**Returns:** Array disks + Cache SSDs + Boot USB (17 disks in tested system). + +--- + +#### **shares** - Network Shares +List all user shares with comments. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ shares { id name comment } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "shares": [ + { + "id": "3cb1026338736ed07b8afec2c484e429710b0f6550dc65d0c5c410ea9d0fa6b2:appdata", + "name": "appdata", + "comment": "application data" + }, + { + "id": "3cb1026338736ed07b8afec2c484e429710b0f6550dc65d0c5c410ea9d0fa6b2:backups", + "name": "backups", + "comment": "primary homelab backup target" + } + ] + } +} +``` + +--- + +### 3. Virtualization + +#### **docker** - Docker Containers +List all Docker containers with status and metadata. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ docker { containers { id names image state status created autoStart } } }" + }' | jq '.' +``` + +**Response (when no containers):** +```json +{ + "data": { + "docker": { + "containers": [] + } + } +} +``` + +**Note:** Container logs are NOT accessible via this API. Use `docker logs` via SSH. + +--- + +#### **vms** - Virtual Machines +List all VMs with status and resource allocation. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ vms { id name state cpus memory autostart } }" + }' | jq '.' +``` + +**Response (when no VMs):** +```json +{ + "data": { + "vms": [] + } +} +``` + +--- + +### 4. Logs & Monitoring + +#### **logFiles** - List All Log Files +Get list of all available system log files. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ logFiles { name size modifiedAt } }" + }' | jq '.' +``` + +**Response (sample, 32 logs found):** +```json +{ + "data": { + "logFiles": [ + { + "name": "syslog", + "size": 142567, + "modifiedAt": "2026-01-21T13:00:00.000Z" + }, + { + "name": "docker.log", + "size": 66321, + "modifiedAt": "2026-01-05T19:14:53.934Z" + }, + { + "name": "dmesg", + "size": 93128, + "modifiedAt": "2025-12-19T11:09:30.200Z" + } + ] + } +} +``` + +--- + +#### **logFile** - Read Log Content +Read the actual contents of a log file. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "query { logFile(path: \"syslog\", lines: 10) { path totalLines startLine content } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "logFile": { + "path": "/var/log/syslog", + "totalLines": 1395, + "startLine": 1386, + "content": "Jan 21 07:49:49 unraid-server sshd-session[2992319]: Accepted keyboard-interactive/pam for root from 100.80.181.18 port 49724 ssh2\n..." + } + } +} +``` + +**Parameters:** +- `path` - Log file name (required) +- `lines` - Number of lines to return (optional, defaults to last 100) +- `startLine` - Line number to start from (optional) + +**Available logs include:** +- `syslog` - System log +- `docker.log` - Docker daemon log +- `dmesg` - Kernel messages +- `wtmp` - Login records +- And 28 more... + +--- + +#### **notifications** - System Alerts +Get system notifications and alerts. + +**Get notification counts:** +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ notifications { overview { unread { info warning alert total } archive { info warning alert total } } } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "notifications": { + "overview": { + "unread": { + "info": 66, + "warning": 0, + "alert": 0, + "total": 66 + }, + "archive": { + "info": 581, + "warning": 4, + "alert": 1, + "total": 586 + } + } + } + } +} +``` + +**List unread notifications:** +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ notifications { list(filter: { type: UNREAD, offset: 0, limit: 10 }) { id subject description timestamp } } }" + }' | jq '.' +``` + +**Response (sample):** +```json +{ + "data": { + "notifications": { + "list": [ + { + "id": "...", + "subject": "Backup Notification", + "description": "ZFS replication was successful...", + "timestamp": "2026-01-21T09:10:40.000Z" + } + ] + } + } +} +``` + +**Parameters for list query:** +- `type` - `UNREAD` or `ARCHIVE` (required) +- `offset` - Starting index (required, use 0 for first page) +- `limit` - Number of results (required, max typically 100) +- `importance` - Filter by `INFO`, `WARNING`, or `ALERT` (optional) + +--- + +### 5. UPS & Power + +#### **upsDevices** - UPS Status +Get UPS battery backup status (if configured). + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ upsDevices { id name status charge load runtime } }" + }' | jq '.' +``` + +**Response (when no UPS):** +```json +{ + "data": { + "upsDevices": [] + } +} +``` + +--- + +### 6. User & Authentication + +#### **me** - Current User Info +Get information about the current authenticated user. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ me { id } }" + }' | jq '.' +``` + +--- + +#### **owner** - Server Owner +Get server owner information. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ owner { username url avatar } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "owner": { + "username": "root", + "url": "", + "avatar": "" + } + } +} +``` + +--- + +#### **isSSOEnabled** - SSO Status +Check if Single Sign-On is enabled. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ "query": "{ isSSOEnabled }" }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "isSSOEnabled": true + } +} +``` + +--- + +#### **oidcProviders** - OIDC Providers +List configured OpenID Connect providers. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ oidcProviders { id } }" + }' | jq '.' +``` + +--- + +### 7. API Keys & Access + +#### **apiKeys** - List API Keys +Get list of all API keys (requires appropriate permissions). + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ apiKeys { id name createdAt } }" + }' | jq '.' +``` + +**Response (sample, 4 keys found):** +```json +{ + "data": { + "apiKeys": [ + { + "id": "key1", + "name": "monitoring", + "createdAt": "2026-01-01T00:00:00.000Z" + } + ] + } +} +``` + +--- + +### 8. Configuration & Settings + +#### **config** - System Configuration +Get system configuration details. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ config { id } }" + }' | jq '.' +``` + +--- + +#### **settings** - System Settings +Get system settings. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ settings { id } }" + }' | jq '.' +``` + +--- + +#### **vars** - System Variables +Get system variables. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ vars { id } }" + }' | jq '.' +``` + +--- + +### 9. Customization & Theming + +#### **customization** - UI Customization +Get UI theme and customization settings. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ customization { theme { name headerBackgroundColor headerPrimaryTextColor showBannerImage showBannerGradient } } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "customization": { + "theme": { + "name": "white", + "headerBackgroundColor": "#2e3440", + "headerPrimaryTextColor": "#FFF", + "showBannerImage": false, + "showBannerGradient": false + } + } + } +} +``` + +--- + +#### **publicTheme** - Public Theme Settings +Get public-facing theme settings. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ publicTheme { name showBannerImage showBannerGradient headerBackgroundColor headerPrimaryTextColor headerSecondaryTextColor } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "publicTheme": { + "name": "white", + "showBannerImage": false, + "showBannerGradient": false, + "headerBackgroundColor": "#2e3440", + "headerPrimaryTextColor": "#FFF", + "headerSecondaryTextColor": "#fff" + } + } +} +``` + +--- + +#### **publicPartnerInfo** - Partner/OEM Branding +Get partner or OEM branding information. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ publicPartnerInfo { partnerName partnerUrl partnerLogoUrl hasPartnerLogo } }" + }' | jq '.' +``` + +**Response:** +```json +{ + "data": { + "publicPartnerInfo": { + "partnerName": null, + "partnerUrl": null, + "partnerLogoUrl": "/webGui/images/UN-logotype-gradient.svg", + "hasPartnerLogo": false + } + } +} +``` + +--- + +### 10. Server Management + +#### **registration** - License Info +Get Unraid license/registration information. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ registration { id } }" + }' | jq '.' +``` + +--- + +#### **server** - Server Metadata +Get server metadata. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ server { id } }" + }' | jq '.' +``` + +--- + +#### **servers** - Multi-Server Management +Get list of servers (for multi-server setups). + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ servers { id } }" + }' | jq '.' +``` + +--- + +### 11. Plugins + +#### **plugins** - Installed Plugins +List installed plugins. + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ plugins { name version author description } }" + }' | jq '.' +``` + +**Response (when no plugins):** +```json +{ + "data": { + "plugins": [] + } +} +``` + +--- + +## 🎯 Complete Dashboard Query + +Get everything useful in a single query: + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "query Dashboard { + info { + time + cpu { model cores threads } + os { distro release } + system { manufacturer model } + } + metrics { + cpu { percentTotal } + memory { total used free percentTotal } + } + array { + state + disks { name device temp status fsSize fsFree fsUsed isSpinning } + parityCheckStatus { status progress errors } + } + shares { name comment } + online + isSSOEnabled + }" + }' | jq '.' +``` + +--- + +## ❌ Endpoints That Return Null + +These queries exist but return `null` in Unraid 7.2: + +1. **`flash`** - Boot USB drive info (returns `null`) +2. **`parityHistory`** - Historical parity checks (returns `null` - use `array.parityCheckStatus` instead) +3. **`services`** - System services (returns `null`) + +--- + +## 🔍 Schema Discovery + +### Discover Available Fields for a Type + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ __type(name: \"Info\") { fields { name type { name } } } }" + }' | jq -r '.data.__type.fields[] | "\(.name): \(.type.name)"' +``` + +### List All Available Queries + +```bash +curl -s -X POST "https://YOUR-UNRAID/graphql" \ + -H "Content-Type: application/json" \ + -H "x-api-key: YOUR_API_KEY" \ + -d '{ + "query": "{ __type(name: \"Query\") { fields { name } } }" + }' | jq -r '.data.__type.fields[].name' | sort +``` + +--- + +## 📝 Field Name Reference + +Common differences from online documentation: + +| Online Docs | Actual Unraid 7.2 Field | +|------------|------------------------| +| `uptime` | `time` | +| `cpu.usage` | `metrics.cpu.percentTotal` | +| `memory.usage` | `metrics.memory.percentTotal` | +| `array.status` | `array.state` | +| `disk.temperature` | `disk.temp` | +| `percentUsed` | `percentTotal` | + +--- + +## ⚡ Best Practices + +1. **Use `metrics` for real-time stats** - CPU/memory usage is in `metrics`, not `info` +2. **Use `array.disks` for array disks** - The top-level `disks` query includes ALL disks (USB, SSDs, etc.) +3. **Always check errors** - GraphQL returns errors in `errors` array +4. **Use introspection** - Field names can vary between versions +5. **Sizes are in kilobytes** - Disk sizes and capacities are in KB, not bytes +6. **Temperature is Celsius** - All temperature values are in Celsius +7. **Handle empty arrays** - Many queries return `[]` when no data exists +8. **Use viewer role** - Create API keys with "Viewer" role for read-only access + +--- + +## 🚫 Known Limitations + +1. **No Docker container logs** - Container output logs are NOT accessible via API +2. **No real-time streaming** - All queries are request/response, no WebSocket subscriptions +3. **Some queries require higher permissions** - Read-only "Viewer" role may not access all queries +4. **No mutation examples included** - This guide covers read-only queries only + +--- + +## 📚 Additional Resources + +- **Unraid Docs:** https://docs.unraid.net/ +- **GraphQL Spec:** https://graphql.org/ +- **GraphQL Introspection:** Use `__schema` and `__type` queries to explore the API + +--- + +**Last Updated:** 2026-01-21 +**API Version:** Unraid 7.2 GraphQL API +**Total Working Endpoints:** 27 of 46 diff --git a/skills/unraid/references/endpoints.md b/skills/unraid/references/endpoints.md new file mode 100644 index 0000000..7fbdd99 --- /dev/null +++ b/skills/unraid/references/endpoints.md @@ -0,0 +1,49 @@ +# Unraid API Endpoints Reference + +Complete list of available GraphQL read-only endpoints in Unraid 7.2+. + +## System & Metrics (8) +1. **`info`** - Hardware specs (CPU, OS, motherboard) +2. **`metrics`** - Real-time CPU/memory usage +3. **`online`** - Server online status +4. **`isInitialSetup`** - Setup completion status +5. **`config`** - System configuration +6. **`vars`** - System variables +7. **`settings`** - System settings +8. **`logFiles`** - List all log files + +## Storage (4) +9. **`array`** - Array status, disks, parity +10. **`disks`** - All physical disks (array + cache + USB) +11. **`shares`** - Network shares +12. **`logFile`** - Read log content + +## Virtualization (2) +13. **`docker`** - Docker containers +14. **`vms`** - Virtual machines + +## Monitoring (2) +15. **`notifications`** - System alerts +16. **`upsDevices`** - UPS battery status + +## User & Auth (4) +17. **`me`** - Current user info +18. **`owner`** - Server owner +19. **`isSSOEnabled`** - SSO status +20. **`oidcProviders`** - OIDC providers + +## API Management (2) +21. **`apiKeys`** - List API keys + +## Customization (3) +22. **`customization`** - UI theme & settings +23. **`publicTheme`** - Public theme +24. **`publicPartnerInfo`** - Partner branding + +## Server Management (3) +25. **`registration`** - License info +26. **`server`** - Server metadata +27. **`servers`** - Multi-server management + +## Bonus (1) +28. **`plugins`** - Installed plugins (returns empty array if none) diff --git a/skills/unraid/references/introspection-schema.md b/skills/unraid/references/introspection-schema.md new file mode 100644 index 0000000..62676a1 --- /dev/null +++ b/skills/unraid/references/introspection-schema.md @@ -0,0 +1,3114 @@ +""" +Indicates exactly one field must be supplied and this field must not be `null`. +""" +directive @oneOf on INPUT_OBJECT + +"""Directive to document required permissions for fields""" +directive @usePermissions( + """The action required for access (must be a valid AuthAction enum value)""" + action: String + + """The resource required for access (must be a valid Resource enum value)""" + resource: String +) on FIELD_DEFINITION + +type ParityCheck { + """Date of the parity check""" + date: DateTime + + """Duration of the parity check in seconds""" + duration: Int + + """Speed of the parity check, in MB/s""" + speed: String + + """Status of the parity check""" + status: ParityCheckStatus! + + """Number of errors during the parity check""" + errors: Int + + """Progress percentage of the parity check""" + progress: Int + + """Whether corrections are being written to parity""" + correcting: Boolean + + """Whether the parity check is paused""" + paused: Boolean + + """Whether the parity check is running""" + running: Boolean +} + +""" +A date-time string at UTC, such as 2019-12-03T09:54:33Z, compliant with the date-time format. +""" +scalar DateTime + +enum ParityCheckStatus { + NEVER_RUN + RUNNING + PAUSED + COMPLETED + CANCELLED + FAILED +} + +type Capacity { + """Free capacity""" + free: String! + + """Used capacity""" + used: String! + + """Total capacity""" + total: String! +} + +type ArrayCapacity { + """Capacity in kilobytes""" + kilobytes: Capacity! + + """Capacity in number of disks""" + disks: Capacity! +} + +type ArrayDisk implements Node { + id: PrefixedID! + + """ + Array slot number. Parity1 is always 0 and Parity2 is always 29. Array slots will be 1 - 28. Cache slots are 30 - 53. Flash is 54. + """ + idx: Int! + name: String + device: String + + """(KB) Disk Size total""" + size: BigInt + status: ArrayDiskStatus + + """Is the disk a HDD or SSD.""" + rotational: Boolean + + """Disk temp - will be NaN if array is not started or DISK_NP""" + temp: Int + + """ + Count of I/O read requests sent to the device I/O drivers. These statistics may be cleared at any time. + """ + numReads: BigInt + + """ + Count of I/O writes requests sent to the device I/O drivers. These statistics may be cleared at any time. + """ + numWrites: BigInt + + """ + Number of unrecoverable errors reported by the device I/O drivers. Missing data due to unrecoverable array read errors is filled in on-the-fly using parity reconstruct (and we attempt to write this data back to the sector(s) which failed). Any unrecoverable write error results in disabling the disk. + """ + numErrors: BigInt + + """(KB) Total Size of the FS (Not present on Parity type drive)""" + fsSize: BigInt + + """(KB) Free Size on the FS (Not present on Parity type drive)""" + fsFree: BigInt + + """(KB) Used Size on the FS (Not present on Parity type drive)""" + fsUsed: BigInt + exportable: Boolean + + """Type of Disk - used to differentiate Cache / Flash / Array / Parity""" + type: ArrayDiskType! + + """(%) Disk space left to warn""" + warning: Int + + """(%) Disk space left for critical""" + critical: Int + + """File system type for the disk""" + fsType: String + + """User comment on disk""" + comment: String + + """File format (ex MBR: 4KiB-aligned)""" + format: String + + """ata | nvme | usb | (others)""" + transport: String + color: ArrayDiskFsColor + + """Whether the disk is currently spinning""" + isSpinning: Boolean +} + +interface Node { + id: PrefixedID! +} + +""" +The `BigInt` scalar type represents non-fractional signed whole numeric values. +""" +scalar BigInt + +enum ArrayDiskStatus { + DISK_NP + DISK_OK + DISK_NP_MISSING + DISK_INVALID + DISK_WRONG + DISK_DSBL + DISK_NP_DSBL + DISK_DSBL_NEW + DISK_NEW +} + +enum ArrayDiskType { + DATA + PARITY + FLASH + CACHE +} + +enum ArrayDiskFsColor { + GREEN_ON + GREEN_BLINK + BLUE_ON + BLUE_BLINK + YELLOW_ON + YELLOW_BLINK + RED_ON + RED_OFF + GREY_OFF +} + +type UnraidArray implements Node { + id: PrefixedID! + + """Current array state""" + state: ArrayState! + + """Current array capacity""" + capacity: ArrayCapacity! + + """Current boot disk""" + boot: ArrayDisk + + """Parity disks in the current array""" + parities: [ArrayDisk!]! + + """Current parity check status""" + parityCheckStatus: ParityCheck! + + """Data disks in the current array""" + disks: [ArrayDisk!]! + + """Caches in the current array""" + caches: [ArrayDisk!]! +} + +enum ArrayState { + STARTED + STOPPED + NEW_ARRAY + RECON_DISK + DISABLE_DISK + SWAP_DSBL + INVALID_EXPANSION + PARITY_NOT_BIGGEST + TOO_MANY_MISSING_DISKS + NEW_DISK_TOO_SMALL + NO_DATA_DISKS +} + +type Share implements Node { + id: PrefixedID! + + """Display name""" + name: String + + """(KB) Free space""" + free: BigInt + + """(KB) Used Size""" + used: BigInt + + """(KB) Total size""" + size: BigInt + + """Disks that are included in this share""" + include: [String!] + + """Disks that are excluded from this share""" + exclude: [String!] + + """Is this share cached""" + cache: Boolean + + """Original name""" + nameOrig: String + + """User comment""" + comment: String + + """Allocator""" + allocator: String + + """Split level""" + splitLevel: String + + """Floor""" + floor: String + + """COW""" + cow: String + + """Color""" + color: String + + """LUKS status""" + luksStatus: String +} + +type DiskPartition { + """The name of the partition""" + name: String! + + """The filesystem type of the partition""" + fsType: DiskFsType! + + """The size of the partition in bytes""" + size: Float! +} + +"""The type of filesystem on the disk partition""" +enum DiskFsType { + XFS + BTRFS + VFAT + ZFS + EXT4 + NTFS +} + +type Disk implements Node { + id: PrefixedID! + + """The device path of the disk (e.g. /dev/sdb)""" + device: String! + + """The type of disk (e.g. SSD, HDD)""" + type: String! + + """The model name of the disk""" + name: String! + + """The manufacturer of the disk""" + vendor: String! + + """The total size of the disk in bytes""" + size: Float! + + """The number of bytes per sector""" + bytesPerSector: Float! + + """The total number of cylinders on the disk""" + totalCylinders: Float! + + """The total number of heads on the disk""" + totalHeads: Float! + + """The total number of sectors on the disk""" + totalSectors: Float! + + """The total number of tracks on the disk""" + totalTracks: Float! + + """The number of tracks per cylinder""" + tracksPerCylinder: Float! + + """The number of sectors per track""" + sectorsPerTrack: Float! + + """The firmware revision of the disk""" + firmwareRevision: String! + + """The serial number of the disk""" + serialNum: String! + + """The interface type of the disk""" + interfaceType: DiskInterfaceType! + + """The SMART status of the disk""" + smartStatus: DiskSmartStatus! + + """The current temperature of the disk in Celsius""" + temperature: Float + + """The partitions on the disk""" + partitions: [DiskPartition!]! + + """Whether the disk is spinning or not""" + isSpinning: Boolean! +} + +"""The type of interface the disk uses to connect to the system""" +enum DiskInterfaceType { + SAS + SATA + USB + PCIE + UNKNOWN +} + +""" +The SMART (Self-Monitoring, Analysis and Reporting Technology) status of the disk +""" +enum DiskSmartStatus { + OK + UNKNOWN +} + +type KeyFile { + location: String + contents: String +} + +type Registration implements Node { + id: PrefixedID! + type: registrationType + keyFile: KeyFile + state: RegistrationState + expiration: String + updateExpiration: String +} + +enum registrationType { + BASIC + PLUS + PRO + STARTER + UNLEASHED + LIFETIME + INVALID + TRIAL +} + +enum RegistrationState { + TRIAL + BASIC + PLUS + PRO + STARTER + UNLEASHED + LIFETIME + EEXPIRED + EGUID + EGUID1 + ETRIAL + ENOKEYFILE + ENOKEYFILE1 + ENOKEYFILE2 + ENOFLASH + ENOFLASH1 + ENOFLASH2 + ENOFLASH3 + ENOFLASH4 + ENOFLASH5 + ENOFLASH6 + ENOFLASH7 + EBLACKLISTED + EBLACKLISTED1 + EBLACKLISTED2 + ENOCONN +} + +type Vars implements Node { + id: PrefixedID! + + """Unraid version""" + version: String + maxArraysz: Int + maxCachesz: Int + + """Machine hostname""" + name: String + timeZone: String + comment: String + security: String + workgroup: String + domain: String + domainShort: String + hideDotFiles: Boolean + localMaster: Boolean + enableFruit: String + + """Should a NTP server be used for time sync?""" + useNtp: Boolean + + """NTP Server 1""" + ntpServer1: String + + """NTP Server 2""" + ntpServer2: String + + """NTP Server 3""" + ntpServer3: String + + """NTP Server 4""" + ntpServer4: String + domainLogin: String + sysModel: String + sysArraySlots: Int + sysCacheSlots: Int + sysFlashSlots: Int + useSsl: Boolean + + """Port for the webui via HTTP""" + port: Int + + """Port for the webui via HTTPS""" + portssl: Int + localTld: String + bindMgt: Boolean + + """Should telnet be enabled?""" + useTelnet: Boolean + porttelnet: Int + useSsh: Boolean + portssh: Int + startPage: String + startArray: Boolean + spindownDelay: String + queueDepth: String + spinupGroups: Boolean + defaultFormat: String + defaultFsType: String + shutdownTimeout: Int + luksKeyfile: String + pollAttributes: String + pollAttributesDefault: String + pollAttributesStatus: String + nrRequests: Int + nrRequestsDefault: Int + nrRequestsStatus: String + mdNumStripes: Int + mdNumStripesDefault: Int + mdNumStripesStatus: String + mdSyncWindow: Int + mdSyncWindowDefault: Int + mdSyncWindowStatus: String + mdSyncThresh: Int + mdSyncThreshDefault: Int + mdSyncThreshStatus: String + mdWriteMethod: Int + mdWriteMethodDefault: String + mdWriteMethodStatus: String + shareDisk: String + shareUser: String + shareUserInclude: String + shareUserExclude: String + shareSmbEnabled: Boolean + shareNfsEnabled: Boolean + shareAfpEnabled: Boolean + shareInitialOwner: String + shareInitialGroup: String + shareCacheEnabled: Boolean + shareCacheFloor: String + shareMoverSchedule: String + shareMoverLogging: Boolean + fuseRemember: String + fuseRememberDefault: String + fuseRememberStatus: String + fuseDirectio: String + fuseDirectioDefault: String + fuseDirectioStatus: String + shareAvahiEnabled: Boolean + shareAvahiSmbName: String + shareAvahiSmbModel: String + shareAvahiAfpName: String + shareAvahiAfpModel: String + safeMode: Boolean + startMode: String + configValid: Boolean + configError: ConfigErrorState + joinStatus: String + deviceCount: Int + flashGuid: String + flashProduct: String + flashVendor: String + regCheck: String + regFile: String + regGuid: String + regTy: registrationType + regState: RegistrationState + + """Registration owner""" + regTo: String + regTm: String + regTm2: String + regGen: String + sbName: String + sbVersion: String + sbUpdated: String + sbEvents: Int + sbState: String + sbClean: Boolean + sbSynced: Int + sbSyncErrs: Int + sbSynced2: Int + sbSyncExit: String + sbNumDisks: Int + mdColor: String + mdNumDisks: Int + mdNumDisabled: Int + mdNumInvalid: Int + mdNumMissing: Int + mdNumNew: Int + mdNumErased: Int + mdResync: Int + mdResyncCorr: String + mdResyncPos: String + mdResyncDb: String + mdResyncDt: String + mdResyncAction: String + mdResyncSize: Int + mdState: String + mdVersion: String + cacheNumDevices: Int + cacheSbNumDisks: Int + fsState: String + + """Human friendly string of array events happening""" + fsProgress: String + + """ + Percentage from 0 - 100 while upgrading a disk or swapping parity drives + """ + fsCopyPrcnt: Int + fsNumMounted: Int + fsNumUnmountable: Int + fsUnmountableMask: String + + """Total amount of user shares""" + shareCount: Int + + """Total amount shares with SMB enabled""" + shareSmbCount: Int + + """Total amount shares with NFS enabled""" + shareNfsCount: Int + + """Total amount shares with AFP enabled""" + shareAfpCount: Int + shareMoverActive: Boolean + csrfToken: String +} + +"""Possible error states for configuration""" +enum ConfigErrorState { + UNKNOWN_ERROR + INELIGIBLE + INVALID + NO_KEY_SERVER + WITHDRAWN +} + +type ApiConfig { + version: String! + extraOrigins: [String!]! + sandbox: Boolean + ssoSubIds: [String!]! + plugins: [String!]! +} + +type Permission { + resource: Resource! + + """Actions allowed on this resource""" + actions: [AuthAction!]! +} + +"""Available resources for permissions""" +enum Resource { + ACTIVATION_CODE + API_KEY + ARRAY + CLOUD + CONFIG + CONNECT + CONNECT__REMOTE_ACCESS + CUSTOMIZATIONS + DASHBOARD + DISK + DISPLAY + DOCKER + FLASH + INFO + LOGS + ME + NETWORK + NOTIFICATIONS + ONLINE + OS + OWNER + PERMISSION + REGISTRATION + SERVERS + SERVICES + SHARE + VARS + VMS + WELCOME +} + +"""Authentication actions with possession (e.g., create:any, read:own)""" +enum AuthAction { + """Create any resource""" + CREATE_ANY + + """Create own resource""" + CREATE_OWN + + """Read any resource""" + READ_ANY + + """Read own resource""" + READ_OWN + + """Update any resource""" + UPDATE_ANY + + """Update own resource""" + UPDATE_OWN + + """Delete any resource""" + DELETE_ANY + + """Delete own resource""" + DELETE_OWN +} + +type ApiKey implements Node { + id: PrefixedID! + key: String! + name: String! + description: String + roles: [Role!]! + createdAt: String! + permissions: [Permission!]! +} + +"""Available roles for API keys and users""" +enum Role { + """Full administrative access to all resources""" + ADMIN + + """Internal Role for Unraid Connect""" + CONNECT + + """Basic read access to user profile only""" + GUEST + + """Read-only access to all resources""" + VIEWER +} + +type NotificationCounts { + info: Int! + warning: Int! + alert: Int! + total: Int! +} + +type NotificationOverview { + unread: NotificationCounts! + archive: NotificationCounts! +} + +type Notification implements Node { + id: PrefixedID! + + """Also known as 'event'""" + title: String! + subject: String! + description: String! + importance: NotificationImportance! + link: String + type: NotificationType! + + """ISO Timestamp for when the notification occurred""" + timestamp: String + formattedTimestamp: String +} + +enum NotificationImportance { + ALERT + INFO + WARNING +} + +enum NotificationType { + UNREAD + ARCHIVE +} + +type Notifications implements Node { + id: PrefixedID! + + """A cached overview of the notifications in the system & their severity.""" + overview: NotificationOverview! + list(filter: NotificationFilter!): [Notification!]! +} + +input NotificationFilter { + importance: NotificationImportance + type: NotificationType! + offset: Int! + limit: Int! +} + +type SsoSettings implements Node { + id: PrefixedID! + + """List of configured OIDC providers""" + oidcProviders: [OidcProvider!]! +} + +type UnifiedSettings implements Node & FormSchema { + id: PrefixedID! + + """The data schema for the settings""" + dataSchema: JSON! + + """The UI schema for the settings""" + uiSchema: JSON! + + """The current values of the settings""" + values: JSON! +} + +interface FormSchema { + """The data schema for the form""" + dataSchema: JSON! + + """The UI schema for the form""" + uiSchema: JSON! + + """The current values of the form""" + values: JSON! +} + +""" +The `JSON` scalar type represents JSON values as specified by [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf). +""" +scalar JSON + +type ApiKeyFormSettings implements Node & FormSchema { + id: PrefixedID! + + """The data schema for the API key form""" + dataSchema: JSON! + + """The UI schema for the API key form""" + uiSchema: JSON! + + """The current values of the API key form""" + values: JSON! +} + +type UpdateSettingsResponse { + """Whether a restart is required for the changes to take effect""" + restartRequired: Boolean! + + """The updated settings values""" + values: JSON! + + """Warning messages about configuration issues found during validation""" + warnings: [String!] +} + +type Settings implements Node { + id: PrefixedID! + + """A view of all settings""" + unified: UnifiedSettings! + + """SSO settings""" + sso: SsoSettings! + + """The API setting values""" + api: ApiConfig! +} + +type RCloneDrive { + """Provider name""" + name: String! + + """Provider options and configuration schema""" + options: JSON! +} + +type RCloneBackupConfigForm { + id: ID! + dataSchema: JSON! + uiSchema: JSON! +} + +type RCloneBackupSettings { + configForm(formOptions: RCloneConfigFormInput): RCloneBackupConfigForm! + drives: [RCloneDrive!]! + remotes: [RCloneRemote!]! +} + +input RCloneConfigFormInput { + providerType: String + showAdvanced: Boolean = false + parameters: JSON +} + +type RCloneRemote { + name: String! + type: String! + parameters: JSON! + + """Complete remote configuration""" + config: JSON! +} + +type ArrayMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Set array state + """ + setState(input: ArrayStateInput!): UnraidArray! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Add new disk to array + """ + addDiskToArray(input: ArrayDiskInput!): UnraidArray! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Remove existing disk from array. NOTE: The array must be stopped before running this otherwise it'll throw an error. + """ + removeDiskFromArray(input: ArrayDiskInput!): UnraidArray! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Mount a disk in the array + """ + mountArrayDisk(id: PrefixedID!): ArrayDisk! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Unmount a disk from the array + """ + unmountArrayDisk(id: PrefixedID!): ArrayDisk! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Clear statistics for a disk in the array + """ + clearArrayDiskStatistics(id: PrefixedID!): Boolean! +} + +input ArrayStateInput { + """Array state""" + desiredState: ArrayStateInputState! +} + +enum ArrayStateInputState { + START + STOP +} + +input ArrayDiskInput { + """Disk ID""" + id: PrefixedID! + + """The slot for the disk""" + slot: Int +} + +type DockerMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **DOCKER** + + #### Description: + + Start a container + """ + start(id: PrefixedID!): DockerContainer! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **DOCKER** + + #### Description: + + Stop a container + """ + stop(id: PrefixedID!): DockerContainer! +} + +type VmMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Start a virtual machine + """ + start(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Stop a virtual machine + """ + stop(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Pause a virtual machine + """ + pause(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Resume a virtual machine + """ + resume(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Force stop a virtual machine + """ + forceStop(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Reboot a virtual machine + """ + reboot(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Reset a virtual machine + """ + reset(id: PrefixedID!): Boolean! +} + +"""API Key related mutations""" +type ApiKeyMutations { + """ + #### Required Permissions: + + - Action: **CREATE_ANY** + - Resource: **API_KEY** + + #### Description: + + Create an API key + """ + create(input: CreateApiKeyInput!): ApiKey! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **API_KEY** + + #### Description: + + Add a role to an API key + """ + addRole(input: AddRoleForApiKeyInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **API_KEY** + + #### Description: + + Remove a role from an API key + """ + removeRole(input: RemoveRoleFromApiKeyInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **DELETE_ANY** + - Resource: **API_KEY** + + #### Description: + + Delete one or more API keys + """ + delete(input: DeleteApiKeyInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **API_KEY** + + #### Description: + + Update an API key + """ + update(input: UpdateApiKeyInput!): ApiKey! +} + +input CreateApiKeyInput { + name: String! + description: String + roles: [Role!] + permissions: [AddPermissionInput!] + + """ + This will replace the existing key if one already exists with the same name, otherwise returns the existing key + """ + overwrite: Boolean +} + +input AddPermissionInput { + resource: Resource! + actions: [AuthAction!]! +} + +input AddRoleForApiKeyInput { + apiKeyId: PrefixedID! + role: Role! +} + +input RemoveRoleFromApiKeyInput { + apiKeyId: PrefixedID! + role: Role! +} + +input DeleteApiKeyInput { + ids: [PrefixedID!]! +} + +input UpdateApiKeyInput { + id: PrefixedID! + name: String + description: String + roles: [Role!] + permissions: [AddPermissionInput!] +} + +"""Customization related mutations""" +type CustomizationMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **CUSTOMIZATIONS** + + #### Description: + + Update the UI theme (writes dynamix.cfg) + """ + setTheme( + """Theme to apply""" + theme: ThemeName! + ): Theme! +} + +"""The theme name""" +enum ThemeName { + azure + black + gray + white +} + +""" +Parity check related mutations, WIP, response types and functionaliy will change +""" +type ParityCheckMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Start a parity check + """ + start(correct: Boolean!): JSON! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Pause a parity check + """ + pause: JSON! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Resume a parity check + """ + resume: JSON! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Cancel a parity check + """ + cancel: JSON! +} + +"""RClone related mutations""" +type RCloneMutations { + """ + #### Required Permissions: + + - Action: **CREATE_ANY** + - Resource: **FLASH** + + #### Description: + + Create a new RClone remote + """ + createRCloneRemote(input: CreateRCloneRemoteInput!): RCloneRemote! + + """ + #### Required Permissions: + + - Action: **DELETE_ANY** + - Resource: **FLASH** + + #### Description: + + Delete an existing RClone remote + """ + deleteRCloneRemote(input: DeleteRCloneRemoteInput!): Boolean! +} + +input CreateRCloneRemoteInput { + name: String! + type: String! + parameters: JSON! +} + +input DeleteRCloneRemoteInput { + name: String! +} + +type Config implements Node { + id: PrefixedID! + valid: Boolean + error: String +} + +type PublicPartnerInfo { + partnerName: String + + """Indicates if a partner logo exists""" + hasPartnerLogo: Boolean! + partnerUrl: String + + """ + The path to the partner logo image on the flash drive, relative to the activation code file + """ + partnerLogoUrl: String +} + +type ActivationCode { + code: String + partnerName: String + partnerUrl: String + serverName: String + sysModel: String + comment: String + header: String + headermetacolor: String + background: String + showBannerGradient: Boolean + theme: String +} + +type Customization { + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ACTIVATION_CODE** + """ + activationCode: ActivationCode + partnerInfo: PublicPartnerInfo + theme: Theme! +} + +type Theme { + """The theme name""" + name: ThemeName! + + """Whether to show the header banner image""" + showBannerImage: Boolean! + + """Whether to show the banner gradient""" + showBannerGradient: Boolean! + + """Whether to show the description in the header""" + showHeaderDescription: Boolean! + + """The background color of the header""" + headerBackgroundColor: String + + """The text color of the header""" + headerPrimaryTextColor: String + + """The secondary text color of the header""" + headerSecondaryTextColor: String +} + +type ExplicitStatusItem { + name: String! + updateStatus: UpdateStatus! +} + +"""Update status of a container.""" +enum UpdateStatus { + UP_TO_DATE + UPDATE_AVAILABLE + REBUILD_READY + UNKNOWN +} + +type ContainerPort { + ip: String + privatePort: Port + publicPort: Port + type: ContainerPortType! +} + +""" +A field whose value is a valid TCP port within the range of 0 to 65535: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_ports +""" +scalar Port + +enum ContainerPortType { + TCP + UDP +} + +type ContainerHostConfig { + networkMode: String! +} + +type DockerContainer implements Node { + id: PrefixedID! + names: [String!]! + image: String! + imageId: String! + command: String! + created: Int! + ports: [ContainerPort!]! + + """Total size of all files in the container (in bytes)""" + sizeRootFs: BigInt + labels: JSON + state: ContainerState! + status: String! + hostConfig: ContainerHostConfig + networkSettings: JSON + mounts: [JSON!] + autoStart: Boolean! +} + +enum ContainerState { + RUNNING + EXITED +} + +type DockerNetwork implements Node { + id: PrefixedID! + name: String! + created: String! + scope: String! + driver: String! + enableIPv6: Boolean! + ipam: JSON! + internal: Boolean! + attachable: Boolean! + ingress: Boolean! + configFrom: JSON! + configOnly: Boolean! + containers: JSON! + options: JSON! + labels: JSON! +} + +type Docker implements Node { + id: PrefixedID! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DOCKER** + """ + containers(skipCache: Boolean! = false): [DockerContainer!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DOCKER** + """ + networks(skipCache: Boolean! = false): [DockerNetwork!]! +} + +type ResolvedOrganizerView { + id: String! + name: String! + root: ResolvedOrganizerEntry! + prefs: JSON +} + +union ResolvedOrganizerEntry = ResolvedOrganizerFolder | OrganizerContainerResource | OrganizerResource + +type ResolvedOrganizerFolder { + id: String! + type: String! + name: String! + children: [ResolvedOrganizerEntry!]! +} + +type OrganizerContainerResource { + id: String! + type: String! + name: String! + meta: DockerContainer +} + +type OrganizerResource { + id: String! + type: String! + name: String! + meta: JSON +} + +type ResolvedOrganizerV1 { + version: Float! + views: [ResolvedOrganizerView!]! +} + +type FlashBackupStatus { + """Status message indicating the outcome of the backup initiation.""" + status: String! + + """Job ID if available, can be used to check job status.""" + jobId: String +} + +type Flash implements Node { + id: PrefixedID! + guid: String! + vendor: String! + product: String! +} + +type InfoGpu implements Node { + id: PrefixedID! + + """GPU type/manufacturer""" + type: String! + + """GPU type identifier""" + typeid: String! + + """Whether GPU is blacklisted""" + blacklisted: Boolean! + + """Device class""" + class: String! + + """Product ID""" + productid: String! + + """Vendor name""" + vendorname: String +} + +type InfoNetwork implements Node { + id: PrefixedID! + + """Network interface name""" + iface: String! + + """Network interface model""" + model: String + + """Network vendor""" + vendor: String + + """MAC address""" + mac: String + + """Virtual interface flag""" + virtual: Boolean + + """Network speed""" + speed: String + + """DHCP enabled flag""" + dhcp: Boolean +} + +type InfoPci implements Node { + id: PrefixedID! + + """Device type/manufacturer""" + type: String! + + """Type identifier""" + typeid: String! + + """Vendor name""" + vendorname: String + + """Vendor ID""" + vendorid: String! + + """Product name""" + productname: String + + """Product ID""" + productid: String! + + """Blacklisted status""" + blacklisted: String! + + """Device class""" + class: String! +} + +type InfoUsb implements Node { + id: PrefixedID! + + """USB device name""" + name: String! + + """USB bus number""" + bus: String + + """USB device number""" + device: String +} + +type InfoDevices implements Node { + id: PrefixedID! + + """List of GPU devices""" + gpu: [InfoGpu!] + + """List of network interfaces""" + network: [InfoNetwork!] + + """List of PCI devices""" + pci: [InfoPci!] + + """List of USB devices""" + usb: [InfoUsb!] +} + +type InfoDisplayCase implements Node { + id: PrefixedID! + + """Case image URL""" + url: String! + + """Case icon identifier""" + icon: String! + + """Error message if any""" + error: String! + + """Base64 encoded case image""" + base64: String! +} + +type InfoDisplay implements Node { + id: PrefixedID! + + """Case display configuration""" + case: InfoDisplayCase! + + """UI theme name""" + theme: ThemeName! + + """Temperature unit (C or F)""" + unit: Temperature! + + """Enable UI scaling""" + scale: Boolean! + + """Show tabs in UI""" + tabs: Boolean! + + """Enable UI resize""" + resize: Boolean! + + """Show WWN identifiers""" + wwn: Boolean! + + """Show totals""" + total: Boolean! + + """Show usage statistics""" + usage: Boolean! + + """Show text labels""" + text: Boolean! + + """Warning temperature threshold""" + warning: Int! + + """Critical temperature threshold""" + critical: Int! + + """Hot temperature threshold""" + hot: Int! + + """Maximum temperature threshold""" + max: Int + + """Locale setting""" + locale: String +} + +"""Temperature unit""" +enum Temperature { + CELSIUS + FAHRENHEIT +} + +"""CPU load for a single core""" +type CpuLoad { + """The total CPU load on a single core, in percent.""" + percentTotal: Float! + + """The percentage of time the CPU spent in user space.""" + percentUser: Float! + + """The percentage of time the CPU spent in kernel space.""" + percentSystem: Float! + + """ + The percentage of time the CPU spent on low-priority (niced) user space processes. + """ + percentNice: Float! + + """The percentage of time the CPU was idle.""" + percentIdle: Float! + + """The percentage of time the CPU spent servicing hardware interrupts.""" + percentIrq: Float! + + """The percentage of time the CPU spent running virtual machines (guest).""" + percentGuest: Float! + + """The percentage of CPU time stolen by the hypervisor.""" + percentSteal: Float! +} + +type CpuPackages implements Node { + id: PrefixedID! + + """Total CPU package power draw (W)""" + totalPower: Float! + + """Power draw per package (W)""" + power: [Float!]! + + """Temperature per package (°C)""" + temp: [Float!]! +} + +type CpuUtilization implements Node { + id: PrefixedID! + + """Total CPU load in percent""" + percentTotal: Float! + + """CPU load for each core""" + cpus: [CpuLoad!]! +} + +type InfoCpu implements Node { + id: PrefixedID! + + """CPU manufacturer""" + manufacturer: String + + """CPU brand name""" + brand: String + + """CPU vendor""" + vendor: String + + """CPU family""" + family: String + + """CPU model""" + model: String + + """CPU stepping""" + stepping: Int + + """CPU revision""" + revision: String + + """CPU voltage""" + voltage: String + + """Current CPU speed in GHz""" + speed: Float + + """Minimum CPU speed in GHz""" + speedmin: Float + + """Maximum CPU speed in GHz""" + speedmax: Float + + """Number of CPU threads""" + threads: Int + + """Number of CPU cores""" + cores: Int + + """Number of physical processors""" + processors: Int + + """CPU socket type""" + socket: String + + """CPU cache information""" + cache: JSON + + """CPU feature flags""" + flags: [String!] + + """ + Per-package array of core/thread pairs, e.g. [[[0,1],[2,3]], [[4,5],[6,7]]] + """ + topology: [[[Int!]!]!]! + packages: CpuPackages! +} + +type MemoryLayout implements Node { + id: PrefixedID! + + """Memory module size in bytes""" + size: BigInt! + + """Memory bank location (e.g., BANK 0)""" + bank: String + + """Memory type (e.g., DDR4, DDR5)""" + type: String + + """Memory clock speed in MHz""" + clockSpeed: Int + + """Part number of the memory module""" + partNum: String + + """Serial number of the memory module""" + serialNum: String + + """Memory manufacturer""" + manufacturer: String + + """Form factor (e.g., DIMM, SODIMM)""" + formFactor: String + + """Configured voltage in millivolts""" + voltageConfigured: Int + + """Minimum voltage in millivolts""" + voltageMin: Int + + """Maximum voltage in millivolts""" + voltageMax: Int +} + +type MemoryUtilization implements Node { + id: PrefixedID! + + """Total system memory in bytes""" + total: BigInt! + + """Used memory in bytes""" + used: BigInt! + + """Free memory in bytes""" + free: BigInt! + + """Available memory in bytes""" + available: BigInt! + + """Active memory in bytes""" + active: BigInt! + + """Buffer/cache memory in bytes""" + buffcache: BigInt! + + """Memory usage percentage""" + percentTotal: Float! + + """Total swap memory in bytes""" + swapTotal: BigInt! + + """Used swap memory in bytes""" + swapUsed: BigInt! + + """Free swap memory in bytes""" + swapFree: BigInt! + + """Swap usage percentage""" + percentSwapTotal: Float! +} + +type InfoMemory implements Node { + id: PrefixedID! + + """Physical memory layout""" + layout: [MemoryLayout!]! +} + +type InfoOs implements Node { + id: PrefixedID! + + """Operating system platform""" + platform: String + + """Linux distribution name""" + distro: String + + """OS release version""" + release: String + + """OS codename""" + codename: String + + """Kernel version""" + kernel: String + + """OS architecture""" + arch: String + + """Hostname""" + hostname: String + + """Fully qualified domain name""" + fqdn: String + + """OS build identifier""" + build: String + + """Service pack version""" + servicepack: String + + """Boot time ISO string""" + uptime: String + + """OS logo name""" + logofile: String + + """OS serial number""" + serial: String + + """OS started via UEFI""" + uefi: Boolean +} + +type InfoSystem implements Node { + id: PrefixedID! + + """System manufacturer""" + manufacturer: String + + """System model""" + model: String + + """System version""" + version: String + + """System serial number""" + serial: String + + """System UUID""" + uuid: String + + """System SKU""" + sku: String + + """Virtual machine flag""" + virtual: Boolean +} + +type InfoBaseboard implements Node { + id: PrefixedID! + + """Motherboard manufacturer""" + manufacturer: String + + """Motherboard model""" + model: String + + """Motherboard version""" + version: String + + """Motherboard serial number""" + serial: String + + """Motherboard asset tag""" + assetTag: String + + """Maximum memory capacity in bytes""" + memMax: Float + + """Number of memory slots""" + memSlots: Float +} + +type CoreVersions { + """Unraid version""" + unraid: String + + """Unraid API version""" + api: String + + """Kernel version""" + kernel: String +} + +type PackageVersions { + """OpenSSL version""" + openssl: String + + """Node.js version""" + node: String + + """npm version""" + npm: String + + """pm2 version""" + pm2: String + + """Git version""" + git: String + + """nginx version""" + nginx: String + + """PHP version""" + php: String + + """Docker version""" + docker: String +} + +type InfoVersions implements Node { + id: PrefixedID! + + """Core system versions""" + core: CoreVersions! + + """Software package versions""" + packages: PackageVersions +} + +type Info implements Node { + id: PrefixedID! + + """Current server time""" + time: DateTime! + + """Motherboard information""" + baseboard: InfoBaseboard! + + """CPU information""" + cpu: InfoCpu! + + """Device information""" + devices: InfoDevices! + + """Display configuration""" + display: InfoDisplay! + + """Machine ID""" + machineId: ID + + """Memory information""" + memory: InfoMemory! + + """Operating system information""" + os: InfoOs! + + """System information""" + system: InfoSystem! + + """Software versions""" + versions: InfoVersions! +} + +type LogFile { + """Name of the log file""" + name: String! + + """Full path to the log file""" + path: String! + + """Size of the log file in bytes""" + size: Int! + + """Last modified timestamp""" + modifiedAt: DateTime! +} + +type LogFileContent { + """Path to the log file""" + path: String! + + """Content of the log file""" + content: String! + + """Total number of lines in the file""" + totalLines: Int! + + """Starting line number of the content (1-indexed)""" + startLine: Int +} + +"""System metrics including CPU and memory utilization""" +type Metrics implements Node { + id: PrefixedID! + + """Current CPU utilization metrics""" + cpu: CpuUtilization + + """Current memory utilization metrics""" + memory: MemoryUtilization +} + +type Owner { + username: String! + url: String! + avatar: String! +} + +type ProfileModel implements Node { + id: PrefixedID! + username: String! + url: String! + avatar: String! +} + +type Server implements Node { + id: PrefixedID! + owner: ProfileModel! + guid: String! + apikey: String! + name: String! + + """Whether this server is online or offline""" + status: ServerStatus! + wanip: String! + lanip: String! + localurl: String! + remoteurl: String! +} + +enum ServerStatus { + ONLINE + OFFLINE + NEVER_CONNECTED +} + +type OidcAuthorizationRule { + """The claim to check (e.g., email, sub, groups, hd)""" + claim: String! + + """The comparison operator""" + operator: AuthorizationOperator! + + """The value(s) to match against""" + value: [String!]! +} + +"""Operators for authorization rule matching""" +enum AuthorizationOperator { + EQUALS + CONTAINS + ENDS_WITH + STARTS_WITH +} + +type OidcProvider { + """The unique identifier for the OIDC provider""" + id: PrefixedID! + + """Display name of the OIDC provider""" + name: String! + + """OAuth2 client ID registered with the provider""" + clientId: String! + + """OAuth2 client secret (if required by provider)""" + clientSecret: String + + """ + OIDC issuer URL (e.g., https://accounts.google.com). Required for auto-discovery via /.well-known/openid-configuration + """ + issuer: String + + """ + OAuth2 authorization endpoint URL. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + authorizationEndpoint: String + + """ + OAuth2 token endpoint URL. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + tokenEndpoint: String + + """ + JSON Web Key Set URI for token validation. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + jwksUri: String + + """OAuth2 scopes to request (e.g., openid, profile, email)""" + scopes: [String!]! + + """Flexible authorization rules based on claims""" + authorizationRules: [OidcAuthorizationRule!] + + """ + Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass). Defaults to OR. + """ + authorizationRuleMode: AuthorizationRuleMode + + """Custom text for the login button""" + buttonText: String + + """URL or base64 encoded icon for the login button""" + buttonIcon: String + + """ + Button variant style from Reka UI. See https://reka-ui.com/docs/components/button + """ + buttonVariant: String + + """ + Custom CSS styles for the button (e.g., "background: linear-gradient(to right, #4f46e5, #7c3aed); border-radius: 9999px;") + """ + buttonStyle: String +} + +""" +Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass) +""" +enum AuthorizationRuleMode { + OR + AND +} + +type OidcConfiguration { + """List of configured OIDC providers""" + providers: [OidcProvider!]! + + """ + Default allowed redirect origins that apply to all OIDC providers (e.g., Tailscale domains) + """ + defaultAllowedOrigins: [String!] +} + +type OidcSessionValidation { + valid: Boolean! + username: String +} + +type PublicOidcProvider { + id: ID! + name: String! + buttonText: String + buttonIcon: String + buttonVariant: String + buttonStyle: String +} + +type UPSBattery { + """ + Battery charge level as a percentage (0-100). Unit: percent (%). Example: 100 means battery is fully charged + """ + chargeLevel: Int! + + """ + Estimated runtime remaining on battery power. Unit: seconds. Example: 3600 means 1 hour of runtime remaining + """ + estimatedRuntime: Int! + + """ + Battery health status. Possible values: 'Good', 'Replace', 'Unknown'. Indicates if the battery needs replacement + """ + health: String! +} + +type UPSPower { + """ + Input voltage from the wall outlet/mains power. Unit: volts (V). Example: 120.5 for typical US household voltage + """ + inputVoltage: Float! + + """ + Output voltage being delivered to connected devices. Unit: volts (V). Example: 120.5 - should match input voltage when on mains power + """ + outputVoltage: Float! + + """ + Current load on the UPS as a percentage of its capacity. Unit: percent (%). Example: 25 means UPS is loaded at 25% of its maximum capacity + """ + loadPercentage: Int! +} + +type UPSDevice { + """ + Unique identifier for the UPS device. Usually based on the model name or a generated ID + """ + id: ID! + + """Display name for the UPS device. Can be customized by the user""" + name: String! + + """UPS model name/number. Example: 'APC Back-UPS Pro 1500'""" + model: String! + + """ + Current operational status of the UPS. Common values: 'Online', 'On Battery', 'Low Battery', 'Replace Battery', 'Overload', 'Offline'. 'Online' means running on mains power, 'On Battery' means running on battery backup + """ + status: String! + + """Battery-related information""" + battery: UPSBattery! + + """Power-related information""" + power: UPSPower! +} + +type UPSConfiguration { + """ + UPS service state. Values: 'enable' or 'disable'. Controls whether the UPS monitoring service is running + """ + service: String + + """ + Type of cable connecting the UPS to the server. Common values: 'usb', 'smart', 'ether', 'custom'. Determines communication protocol + """ + upsCable: String + + """ + Custom cable configuration string. Only used when upsCable is set to 'custom'. Format depends on specific UPS model + """ + customUpsCable: String + + """ + UPS communication type. Common values: 'usb', 'net', 'snmp', 'dumb', 'pcnet', 'modbus'. Defines how the server communicates with the UPS + """ + upsType: String + + """ + Device path or network address for UPS connection. Examples: '/dev/ttyUSB0' for USB, '192.168.1.100:3551' for network. Depends on upsType setting + """ + device: String + + """ + Override UPS capacity for runtime calculations. Unit: volt-amperes (VA). Example: 1500 for a 1500VA UPS. Leave unset to use UPS-reported capacity + """ + overrideUpsCapacity: Int + + """ + Battery level threshold for shutdown. Unit: percent (%). Example: 10 means shutdown when battery reaches 10%. System will shutdown when battery drops to this level + """ + batteryLevel: Int + + """ + Runtime threshold for shutdown. Unit: minutes. Example: 5 means shutdown when 5 minutes runtime remaining. System will shutdown when estimated runtime drops below this + """ + minutes: Int + + """ + Timeout for UPS communications. Unit: seconds. Example: 0 means no timeout. Time to wait for UPS response before considering it offline + """ + timeout: Int + + """ + Kill UPS power after shutdown. Values: 'yes' or 'no'. If 'yes', tells UPS to cut power after system shutdown. Useful for ensuring complete power cycle + """ + killUps: String + + """ + Network Information Server (NIS) IP address. Default: '0.0.0.0' (listen on all interfaces). IP address for apcupsd network information server + """ + nisIp: String + + """ + Network server mode. Values: 'on' or 'off'. Enable to allow network clients to monitor this UPS + """ + netServer: String + + """ + UPS name for network monitoring. Used to identify this UPS on the network. Example: 'SERVER_UPS' + """ + upsName: String + + """ + Override UPS model name. Used for display purposes. Leave unset to use UPS-reported model + """ + modelName: String +} + +type VmDomain implements Node { + """The unique identifier for the vm (uuid)""" + id: PrefixedID! + + """A friendly name for the vm""" + name: String + + """Current domain vm state""" + state: VmState! + + """The UUID of the vm""" + uuid: String @deprecated(reason: "Use id instead") +} + +"""The state of a virtual machine""" +enum VmState { + NOSTATE + RUNNING + IDLE + PAUSED + SHUTDOWN + SHUTOFF + CRASHED + PMSUSPENDED +} + +type Vms implements Node { + id: PrefixedID! + domains: [VmDomain!] + domain: [VmDomain!] +} + +type Uptime { + timestamp: String +} + +type Service implements Node { + id: PrefixedID! + name: String + online: Boolean + uptime: Uptime + version: String +} + +type UserAccount implements Node { + id: PrefixedID! + + """The name of the user""" + name: String! + + """A description of the user""" + description: String! + + """The roles of the user""" + roles: [Role!]! + + """The permissions of the user""" + permissions: [Permission!] +} + +type Plugin { + """The name of the plugin package""" + name: String! + + """The version of the plugin package""" + version: String! + + """Whether the plugin has an API module""" + hasApiModule: Boolean + + """Whether the plugin has a CLI module""" + hasCliModule: Boolean +} + +""" +### Description: + +ID scalar type that prefixes the underlying ID with the server identifier on output and strips it on input. + +We use this scalar type to ensure that the ID is unique across all servers, allowing the same underlying resource ID to be used across different server instances. + +#### Input Behavior: + +When providing an ID as input (e.g., in arguments or input objects), the server identifier prefix (':') is optional. + +- If the prefix is present (e.g., '123:456'), it will be automatically stripped, and only the underlying ID ('456') will be used internally. +- If the prefix is absent (e.g., '456'), the ID will be used as-is. + +This makes it flexible for clients, as they don't strictly need to know or provide the server ID. + +#### Output Behavior: + +When an ID is returned in the response (output), it will *always* be prefixed with the current server's unique identifier (e.g., '123:456'). + +#### Example: + +Note: The server identifier is '123' in this example. + +##### Input (Prefix Optional): +```graphql +# Both of these are valid inputs resolving to internal ID '456' +{ + someQuery(id: "123:456") { ... } + anotherQuery(id: "456") { ... } +} +``` + +##### Output (Prefix Always Added): +```graphql +# Assuming internal ID is '456' +{ + "data": { + "someResource": { + "id": "123:456" + } + } +} +``` +""" +scalar PrefixedID + +type Query { + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **API_KEY** + """ + apiKeys: [ApiKey!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **API_KEY** + """ + apiKey(id: PrefixedID!): ApiKey + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **PERMISSION** + + #### Description: + + All possible roles for API keys + """ + apiKeyPossibleRoles: [Role!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **PERMISSION** + + #### Description: + + All possible permissions for API keys + """ + apiKeyPossiblePermissions: [Permission!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **PERMISSION** + + #### Description: + + Get the actual permissions that would be granted by a set of roles + """ + getPermissionsForRoles(roles: [Role!]!): [Permission!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **PERMISSION** + + #### Description: + + Preview the effective permissions for a combination of roles and explicit permissions + """ + previewEffectivePermissions(roles: [Role!], permissions: [AddPermissionInput!]): [Permission!]! + + """Get all available authentication actions with possession""" + getAvailableAuthActions: [AuthAction!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **API_KEY** + + #### Description: + + Get JSON Schema for API key creation form + """ + getApiKeyCreationFormSchema: ApiKeyFormSettings! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + """ + config: Config! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **FLASH** + """ + flash: Flash! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ME** + """ + me: UserAccount! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **NOTIFICATIONS** + + #### Description: + + Get all notifications + """ + notifications: Notifications! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ONLINE** + """ + online: Boolean! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **OWNER** + """ + owner: Owner! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **REGISTRATION** + """ + registration: Registration + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SERVERS** + """ + server: Server + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SERVERS** + """ + servers: [Server!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SERVICES** + """ + services: [Service!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SHARE** + """ + shares: [Share!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **VARS** + """ + vars: Vars! + isInitialSetup: Boolean! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **VMS** + + #### Description: + + Get information about all VMs on the system + """ + vms: Vms! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ARRAY** + """ + parityHistory: [ParityCheck!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ARRAY** + """ + array: UnraidArray! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CUSTOMIZATIONS** + """ + customization: Customization + publicPartnerInfo: PublicPartnerInfo + publicTheme: Theme! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DOCKER** + """ + docker: Docker! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DISK** + """ + disks: [Disk!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DISK** + """ + disk(id: PrefixedID!): Disk! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **FLASH** + """ + rclone: RCloneBackupSettings! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + info: Info! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **LOGS** + """ + logFiles: [LogFile!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **LOGS** + """ + logFile(path: String!, lines: Int, startLine: Int): LogFileContent! + settings: Settings! + isSSOEnabled: Boolean! + + """Get public OIDC provider information for login buttons""" + publicOidcProviders: [PublicOidcProvider!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + Get all configured OIDC providers (admin only) + """ + oidcProviders: [OidcProvider!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + Get a specific OIDC provider by ID + """ + oidcProvider(id: PrefixedID!): OidcProvider + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + Get the full OIDC configuration (admin only) + """ + oidcConfiguration: OidcConfiguration! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + Validate an OIDC session token (internal use for CLI validation) + """ + validateOidcSession(token: String!): OidcSessionValidation! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + metrics: Metrics! + upsDevices: [UPSDevice!]! + upsDeviceById(id: String!): UPSDevice + upsConfiguration: UPSConfiguration! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + List all installed plugins with their metadata + """ + plugins: [Plugin!]! +} + +type Mutation { + """Creates a new notification record""" + createNotification(input: NotificationData!): Notification! + deleteNotification(id: PrefixedID!, type: NotificationType!): NotificationOverview! + + """Deletes all archived notifications on server.""" + deleteArchivedNotifications: NotificationOverview! + + """Marks a notification as archived.""" + archiveNotification(id: PrefixedID!): Notification! + archiveNotifications(ids: [PrefixedID!]!): NotificationOverview! + archiveAll(importance: NotificationImportance): NotificationOverview! + + """Marks a notification as unread.""" + unreadNotification(id: PrefixedID!): Notification! + unarchiveNotifications(ids: [PrefixedID!]!): NotificationOverview! + unarchiveAll(importance: NotificationImportance): NotificationOverview! + + """Reads each notification to recompute & update the overview.""" + recalculateOverview: NotificationOverview! + array: ArrayMutations! + docker: DockerMutations! + vm: VmMutations! + parityCheck: ParityCheckMutations! + apiKey: ApiKeyMutations! + customization: CustomizationMutations! + rclone: RCloneMutations! + + """Initiates a flash drive backup using a configured remote.""" + initiateFlashBackup(input: InitiateFlashBackupInput!): FlashBackupStatus! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **CONFIG** + """ + updateSettings(input: JSON!): UpdateSettingsResponse! + configureUps(config: UPSConfigInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **CONFIG** + + #### Description: + + Add one or more plugins to the API. Returns false if restart was triggered automatically, true if manual restart is required. + """ + addPlugin(input: PluginManagementInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **DELETE_ANY** + - Resource: **CONFIG** + + #### Description: + + Remove one or more plugins from the API. Returns false if restart was triggered automatically, true if manual restart is required. + """ + removePlugin(input: PluginManagementInput!): Boolean! +} + +input NotificationData { + title: String! + subject: String! + description: String! + importance: NotificationImportance! + link: String +} + +input InitiateFlashBackupInput { + """The name of the remote configuration to use for the backup.""" + remoteName: String! + + """Source path to backup (typically the flash drive).""" + sourcePath: String! + + """Destination path on the remote.""" + destinationPath: String! + + """ + Additional options for the backup operation, such as --dry-run or --transfers. + """ + options: JSON +} + +input UPSConfigInput { + """Enable or disable the UPS monitoring service""" + service: UPSServiceState + + """Type of cable connecting the UPS to the server""" + upsCable: UPSCableType + + """ + Custom cable configuration (only used when upsCable is CUSTOM). Format depends on specific UPS model + """ + customUpsCable: String + + """UPS communication protocol""" + upsType: UPSType + + """ + Device path or network address for UPS connection. Examples: '/dev/ttyUSB0' for USB, '192.168.1.100:3551' for network + """ + device: String + + """ + Override UPS capacity for runtime calculations. Unit: watts (W). Leave unset to use UPS-reported capacity + """ + overrideUpsCapacity: Int + + """ + Battery level percentage to initiate shutdown. Unit: percent (%) - Valid range: 0-100 + """ + batteryLevel: Int + + """Runtime left in minutes to initiate shutdown. Unit: minutes""" + minutes: Int + + """ + Time on battery before shutdown. Unit: seconds. Set to 0 to disable timeout-based shutdown + """ + timeout: Int + + """ + Turn off UPS power after system shutdown. Useful for ensuring complete power cycle + """ + killUps: UPSKillPower +} + +"""Service state for UPS daemon""" +enum UPSServiceState { + ENABLE + DISABLE +} + +"""UPS cable connection types""" +enum UPSCableType { + USB + SIMPLE + SMART + ETHER + CUSTOM +} + +"""UPS communication protocols""" +enum UPSType { + USB + APCSMART + NET + SNMP + DUMB + PCNET + MODBUS +} + +"""Kill UPS power after shutdown option""" +enum UPSKillPower { + YES + NO +} + +input PluginManagementInput { + """Array of plugin package names to add or remove""" + names: [String!]! + + """ + Whether to treat plugins as bundled plugins. Bundled plugins are installed to node_modules at build time and controlled via config only. + """ + bundled: Boolean! = false + + """ + Whether to restart the API after the operation. When false, a restart has already been queued. + """ + restart: Boolean! = true +} + +type Subscription { + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **NOTIFICATIONS** + """ + notificationAdded: Notification! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **NOTIFICATIONS** + """ + notificationsOverview: NotificationOverview! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **OWNER** + """ + ownerSubscription: Owner! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SERVERS** + """ + serversSubscription: Server! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ARRAY** + """ + parityHistorySubscription: ParityCheck! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ARRAY** + """ + arraySubscription: UnraidArray! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **LOGS** + """ + logFile(path: String!): LogFileContent! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + systemMetricsCpu: CpuUtilization! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + systemMetricsCpuTelemetry: CpuPackages! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + systemMetricsMemory: MemoryUtilization! + upsUpdates: UPSDevice! +} diff --git a/skills/unraid/references/quick-reference.md b/skills/unraid/references/quick-reference.md new file mode 100644 index 0000000..fa960b8 --- /dev/null +++ b/skills/unraid/references/quick-reference.md @@ -0,0 +1,219 @@ +# Unraid API Quick Reference + +Quick reference for the most common Unraid GraphQL API queries. + +## Setup + +```bash +# Set environment variables +export UNRAID_URL="https://your-unraid-server/graphql" +export UNRAID_API_KEY="your-api-key-here" + +# Or use the helper script directly +./scripts/unraid-query.sh -u "$UNRAID_URL" -k "$API_KEY" -q "{ online }" +``` + +## Common Queries + +### System Status +```graphql +{ + online + metrics { + cpu { percentTotal } + memory { total used free percentTotal } + } +} +``` + +### Array Status +```graphql +{ + array { + state + parityCheckStatus { status progress errors } + } +} +``` + +### Disk List with Temperatures +```graphql +{ + array { + disks { + name + device + temp + status + fsSize + fsFree + isSpinning + } + } +} +``` + +### All Physical Disks (including USB/SSDs) +```graphql +{ + disks { + id + name + } +} +``` + +### Network Shares +```graphql +{ + shares { + name + comment + } +} +``` + +### Docker Containers +```graphql +{ + docker { + containers { + id + names + image + state + status + } + } +} +``` + +### Virtual Machines +```graphql +{ + vms { + id + name + state + cpus + memory + } +} +``` + +### List Log Files +```graphql +{ + logFiles { + name + size + modifiedAt + } +} +``` + +### Read Log Content +```graphql +{ + logFile(path: "syslog", lines: 20) { + content + totalLines + } +} +``` + +### System Info +```graphql +{ + info { + time + cpu { model cores threads } + os { distro release } + system { manufacturer model } + } +} +``` + +### UPS Devices +```graphql +{ + upsDevices { + id + name + status + charge + load + } +} +``` + +### Notifications + +**Counts:** +```graphql +{ + notifications { + overview { + unread { info warning alert total } + archive { info warning alert total } + } + } +} +``` + +**List Unread:** +```graphql +{ + notifications { + list(filter: { type: UNREAD, offset: 0, limit: 10 }) { + id + subject + description + timestamp + } + } +} +``` + +**List Archived:** +```graphql +{ + notifications { + list(filter: { type: ARCHIVE, offset: 0, limit: 10 }) { + id + subject + description + timestamp + } + } +} +``` + +## Field Name Notes + +- Use `metrics` for real-time usage (CPU/memory percentages) +- Use `info` for hardware specs (cores, model, etc.) +- Temperature field is `temp` (not `temperature`) +- Status field is `state` for array (not `status`) +- Sizes are in kilobytes +- Temperatures are in Celsius + +## Response Structure + +All responses follow this pattern: +```json +{ + "data": { + "queryName": { ... } + } +} +``` + +Errors appear in: +```json +{ + "errors": [ + { "message": "..." } + ] +} +``` diff --git a/skills/unraid/references/schema.graphql b/skills/unraid/references/schema.graphql new file mode 100644 index 0000000..62676a1 --- /dev/null +++ b/skills/unraid/references/schema.graphql @@ -0,0 +1,3114 @@ +""" +Indicates exactly one field must be supplied and this field must not be `null`. +""" +directive @oneOf on INPUT_OBJECT + +"""Directive to document required permissions for fields""" +directive @usePermissions( + """The action required for access (must be a valid AuthAction enum value)""" + action: String + + """The resource required for access (must be a valid Resource enum value)""" + resource: String +) on FIELD_DEFINITION + +type ParityCheck { + """Date of the parity check""" + date: DateTime + + """Duration of the parity check in seconds""" + duration: Int + + """Speed of the parity check, in MB/s""" + speed: String + + """Status of the parity check""" + status: ParityCheckStatus! + + """Number of errors during the parity check""" + errors: Int + + """Progress percentage of the parity check""" + progress: Int + + """Whether corrections are being written to parity""" + correcting: Boolean + + """Whether the parity check is paused""" + paused: Boolean + + """Whether the parity check is running""" + running: Boolean +} + +""" +A date-time string at UTC, such as 2019-12-03T09:54:33Z, compliant with the date-time format. +""" +scalar DateTime + +enum ParityCheckStatus { + NEVER_RUN + RUNNING + PAUSED + COMPLETED + CANCELLED + FAILED +} + +type Capacity { + """Free capacity""" + free: String! + + """Used capacity""" + used: String! + + """Total capacity""" + total: String! +} + +type ArrayCapacity { + """Capacity in kilobytes""" + kilobytes: Capacity! + + """Capacity in number of disks""" + disks: Capacity! +} + +type ArrayDisk implements Node { + id: PrefixedID! + + """ + Array slot number. Parity1 is always 0 and Parity2 is always 29. Array slots will be 1 - 28. Cache slots are 30 - 53. Flash is 54. + """ + idx: Int! + name: String + device: String + + """(KB) Disk Size total""" + size: BigInt + status: ArrayDiskStatus + + """Is the disk a HDD or SSD.""" + rotational: Boolean + + """Disk temp - will be NaN if array is not started or DISK_NP""" + temp: Int + + """ + Count of I/O read requests sent to the device I/O drivers. These statistics may be cleared at any time. + """ + numReads: BigInt + + """ + Count of I/O writes requests sent to the device I/O drivers. These statistics may be cleared at any time. + """ + numWrites: BigInt + + """ + Number of unrecoverable errors reported by the device I/O drivers. Missing data due to unrecoverable array read errors is filled in on-the-fly using parity reconstruct (and we attempt to write this data back to the sector(s) which failed). Any unrecoverable write error results in disabling the disk. + """ + numErrors: BigInt + + """(KB) Total Size of the FS (Not present on Parity type drive)""" + fsSize: BigInt + + """(KB) Free Size on the FS (Not present on Parity type drive)""" + fsFree: BigInt + + """(KB) Used Size on the FS (Not present on Parity type drive)""" + fsUsed: BigInt + exportable: Boolean + + """Type of Disk - used to differentiate Cache / Flash / Array / Parity""" + type: ArrayDiskType! + + """(%) Disk space left to warn""" + warning: Int + + """(%) Disk space left for critical""" + critical: Int + + """File system type for the disk""" + fsType: String + + """User comment on disk""" + comment: String + + """File format (ex MBR: 4KiB-aligned)""" + format: String + + """ata | nvme | usb | (others)""" + transport: String + color: ArrayDiskFsColor + + """Whether the disk is currently spinning""" + isSpinning: Boolean +} + +interface Node { + id: PrefixedID! +} + +""" +The `BigInt` scalar type represents non-fractional signed whole numeric values. +""" +scalar BigInt + +enum ArrayDiskStatus { + DISK_NP + DISK_OK + DISK_NP_MISSING + DISK_INVALID + DISK_WRONG + DISK_DSBL + DISK_NP_DSBL + DISK_DSBL_NEW + DISK_NEW +} + +enum ArrayDiskType { + DATA + PARITY + FLASH + CACHE +} + +enum ArrayDiskFsColor { + GREEN_ON + GREEN_BLINK + BLUE_ON + BLUE_BLINK + YELLOW_ON + YELLOW_BLINK + RED_ON + RED_OFF + GREY_OFF +} + +type UnraidArray implements Node { + id: PrefixedID! + + """Current array state""" + state: ArrayState! + + """Current array capacity""" + capacity: ArrayCapacity! + + """Current boot disk""" + boot: ArrayDisk + + """Parity disks in the current array""" + parities: [ArrayDisk!]! + + """Current parity check status""" + parityCheckStatus: ParityCheck! + + """Data disks in the current array""" + disks: [ArrayDisk!]! + + """Caches in the current array""" + caches: [ArrayDisk!]! +} + +enum ArrayState { + STARTED + STOPPED + NEW_ARRAY + RECON_DISK + DISABLE_DISK + SWAP_DSBL + INVALID_EXPANSION + PARITY_NOT_BIGGEST + TOO_MANY_MISSING_DISKS + NEW_DISK_TOO_SMALL + NO_DATA_DISKS +} + +type Share implements Node { + id: PrefixedID! + + """Display name""" + name: String + + """(KB) Free space""" + free: BigInt + + """(KB) Used Size""" + used: BigInt + + """(KB) Total size""" + size: BigInt + + """Disks that are included in this share""" + include: [String!] + + """Disks that are excluded from this share""" + exclude: [String!] + + """Is this share cached""" + cache: Boolean + + """Original name""" + nameOrig: String + + """User comment""" + comment: String + + """Allocator""" + allocator: String + + """Split level""" + splitLevel: String + + """Floor""" + floor: String + + """COW""" + cow: String + + """Color""" + color: String + + """LUKS status""" + luksStatus: String +} + +type DiskPartition { + """The name of the partition""" + name: String! + + """The filesystem type of the partition""" + fsType: DiskFsType! + + """The size of the partition in bytes""" + size: Float! +} + +"""The type of filesystem on the disk partition""" +enum DiskFsType { + XFS + BTRFS + VFAT + ZFS + EXT4 + NTFS +} + +type Disk implements Node { + id: PrefixedID! + + """The device path of the disk (e.g. /dev/sdb)""" + device: String! + + """The type of disk (e.g. SSD, HDD)""" + type: String! + + """The model name of the disk""" + name: String! + + """The manufacturer of the disk""" + vendor: String! + + """The total size of the disk in bytes""" + size: Float! + + """The number of bytes per sector""" + bytesPerSector: Float! + + """The total number of cylinders on the disk""" + totalCylinders: Float! + + """The total number of heads on the disk""" + totalHeads: Float! + + """The total number of sectors on the disk""" + totalSectors: Float! + + """The total number of tracks on the disk""" + totalTracks: Float! + + """The number of tracks per cylinder""" + tracksPerCylinder: Float! + + """The number of sectors per track""" + sectorsPerTrack: Float! + + """The firmware revision of the disk""" + firmwareRevision: String! + + """The serial number of the disk""" + serialNum: String! + + """The interface type of the disk""" + interfaceType: DiskInterfaceType! + + """The SMART status of the disk""" + smartStatus: DiskSmartStatus! + + """The current temperature of the disk in Celsius""" + temperature: Float + + """The partitions on the disk""" + partitions: [DiskPartition!]! + + """Whether the disk is spinning or not""" + isSpinning: Boolean! +} + +"""The type of interface the disk uses to connect to the system""" +enum DiskInterfaceType { + SAS + SATA + USB + PCIE + UNKNOWN +} + +""" +The SMART (Self-Monitoring, Analysis and Reporting Technology) status of the disk +""" +enum DiskSmartStatus { + OK + UNKNOWN +} + +type KeyFile { + location: String + contents: String +} + +type Registration implements Node { + id: PrefixedID! + type: registrationType + keyFile: KeyFile + state: RegistrationState + expiration: String + updateExpiration: String +} + +enum registrationType { + BASIC + PLUS + PRO + STARTER + UNLEASHED + LIFETIME + INVALID + TRIAL +} + +enum RegistrationState { + TRIAL + BASIC + PLUS + PRO + STARTER + UNLEASHED + LIFETIME + EEXPIRED + EGUID + EGUID1 + ETRIAL + ENOKEYFILE + ENOKEYFILE1 + ENOKEYFILE2 + ENOFLASH + ENOFLASH1 + ENOFLASH2 + ENOFLASH3 + ENOFLASH4 + ENOFLASH5 + ENOFLASH6 + ENOFLASH7 + EBLACKLISTED + EBLACKLISTED1 + EBLACKLISTED2 + ENOCONN +} + +type Vars implements Node { + id: PrefixedID! + + """Unraid version""" + version: String + maxArraysz: Int + maxCachesz: Int + + """Machine hostname""" + name: String + timeZone: String + comment: String + security: String + workgroup: String + domain: String + domainShort: String + hideDotFiles: Boolean + localMaster: Boolean + enableFruit: String + + """Should a NTP server be used for time sync?""" + useNtp: Boolean + + """NTP Server 1""" + ntpServer1: String + + """NTP Server 2""" + ntpServer2: String + + """NTP Server 3""" + ntpServer3: String + + """NTP Server 4""" + ntpServer4: String + domainLogin: String + sysModel: String + sysArraySlots: Int + sysCacheSlots: Int + sysFlashSlots: Int + useSsl: Boolean + + """Port for the webui via HTTP""" + port: Int + + """Port for the webui via HTTPS""" + portssl: Int + localTld: String + bindMgt: Boolean + + """Should telnet be enabled?""" + useTelnet: Boolean + porttelnet: Int + useSsh: Boolean + portssh: Int + startPage: String + startArray: Boolean + spindownDelay: String + queueDepth: String + spinupGroups: Boolean + defaultFormat: String + defaultFsType: String + shutdownTimeout: Int + luksKeyfile: String + pollAttributes: String + pollAttributesDefault: String + pollAttributesStatus: String + nrRequests: Int + nrRequestsDefault: Int + nrRequestsStatus: String + mdNumStripes: Int + mdNumStripesDefault: Int + mdNumStripesStatus: String + mdSyncWindow: Int + mdSyncWindowDefault: Int + mdSyncWindowStatus: String + mdSyncThresh: Int + mdSyncThreshDefault: Int + mdSyncThreshStatus: String + mdWriteMethod: Int + mdWriteMethodDefault: String + mdWriteMethodStatus: String + shareDisk: String + shareUser: String + shareUserInclude: String + shareUserExclude: String + shareSmbEnabled: Boolean + shareNfsEnabled: Boolean + shareAfpEnabled: Boolean + shareInitialOwner: String + shareInitialGroup: String + shareCacheEnabled: Boolean + shareCacheFloor: String + shareMoverSchedule: String + shareMoverLogging: Boolean + fuseRemember: String + fuseRememberDefault: String + fuseRememberStatus: String + fuseDirectio: String + fuseDirectioDefault: String + fuseDirectioStatus: String + shareAvahiEnabled: Boolean + shareAvahiSmbName: String + shareAvahiSmbModel: String + shareAvahiAfpName: String + shareAvahiAfpModel: String + safeMode: Boolean + startMode: String + configValid: Boolean + configError: ConfigErrorState + joinStatus: String + deviceCount: Int + flashGuid: String + flashProduct: String + flashVendor: String + regCheck: String + regFile: String + regGuid: String + regTy: registrationType + regState: RegistrationState + + """Registration owner""" + regTo: String + regTm: String + regTm2: String + regGen: String + sbName: String + sbVersion: String + sbUpdated: String + sbEvents: Int + sbState: String + sbClean: Boolean + sbSynced: Int + sbSyncErrs: Int + sbSynced2: Int + sbSyncExit: String + sbNumDisks: Int + mdColor: String + mdNumDisks: Int + mdNumDisabled: Int + mdNumInvalid: Int + mdNumMissing: Int + mdNumNew: Int + mdNumErased: Int + mdResync: Int + mdResyncCorr: String + mdResyncPos: String + mdResyncDb: String + mdResyncDt: String + mdResyncAction: String + mdResyncSize: Int + mdState: String + mdVersion: String + cacheNumDevices: Int + cacheSbNumDisks: Int + fsState: String + + """Human friendly string of array events happening""" + fsProgress: String + + """ + Percentage from 0 - 100 while upgrading a disk or swapping parity drives + """ + fsCopyPrcnt: Int + fsNumMounted: Int + fsNumUnmountable: Int + fsUnmountableMask: String + + """Total amount of user shares""" + shareCount: Int + + """Total amount shares with SMB enabled""" + shareSmbCount: Int + + """Total amount shares with NFS enabled""" + shareNfsCount: Int + + """Total amount shares with AFP enabled""" + shareAfpCount: Int + shareMoverActive: Boolean + csrfToken: String +} + +"""Possible error states for configuration""" +enum ConfigErrorState { + UNKNOWN_ERROR + INELIGIBLE + INVALID + NO_KEY_SERVER + WITHDRAWN +} + +type ApiConfig { + version: String! + extraOrigins: [String!]! + sandbox: Boolean + ssoSubIds: [String!]! + plugins: [String!]! +} + +type Permission { + resource: Resource! + + """Actions allowed on this resource""" + actions: [AuthAction!]! +} + +"""Available resources for permissions""" +enum Resource { + ACTIVATION_CODE + API_KEY + ARRAY + CLOUD + CONFIG + CONNECT + CONNECT__REMOTE_ACCESS + CUSTOMIZATIONS + DASHBOARD + DISK + DISPLAY + DOCKER + FLASH + INFO + LOGS + ME + NETWORK + NOTIFICATIONS + ONLINE + OS + OWNER + PERMISSION + REGISTRATION + SERVERS + SERVICES + SHARE + VARS + VMS + WELCOME +} + +"""Authentication actions with possession (e.g., create:any, read:own)""" +enum AuthAction { + """Create any resource""" + CREATE_ANY + + """Create own resource""" + CREATE_OWN + + """Read any resource""" + READ_ANY + + """Read own resource""" + READ_OWN + + """Update any resource""" + UPDATE_ANY + + """Update own resource""" + UPDATE_OWN + + """Delete any resource""" + DELETE_ANY + + """Delete own resource""" + DELETE_OWN +} + +type ApiKey implements Node { + id: PrefixedID! + key: String! + name: String! + description: String + roles: [Role!]! + createdAt: String! + permissions: [Permission!]! +} + +"""Available roles for API keys and users""" +enum Role { + """Full administrative access to all resources""" + ADMIN + + """Internal Role for Unraid Connect""" + CONNECT + + """Basic read access to user profile only""" + GUEST + + """Read-only access to all resources""" + VIEWER +} + +type NotificationCounts { + info: Int! + warning: Int! + alert: Int! + total: Int! +} + +type NotificationOverview { + unread: NotificationCounts! + archive: NotificationCounts! +} + +type Notification implements Node { + id: PrefixedID! + + """Also known as 'event'""" + title: String! + subject: String! + description: String! + importance: NotificationImportance! + link: String + type: NotificationType! + + """ISO Timestamp for when the notification occurred""" + timestamp: String + formattedTimestamp: String +} + +enum NotificationImportance { + ALERT + INFO + WARNING +} + +enum NotificationType { + UNREAD + ARCHIVE +} + +type Notifications implements Node { + id: PrefixedID! + + """A cached overview of the notifications in the system & their severity.""" + overview: NotificationOverview! + list(filter: NotificationFilter!): [Notification!]! +} + +input NotificationFilter { + importance: NotificationImportance + type: NotificationType! + offset: Int! + limit: Int! +} + +type SsoSettings implements Node { + id: PrefixedID! + + """List of configured OIDC providers""" + oidcProviders: [OidcProvider!]! +} + +type UnifiedSettings implements Node & FormSchema { + id: PrefixedID! + + """The data schema for the settings""" + dataSchema: JSON! + + """The UI schema for the settings""" + uiSchema: JSON! + + """The current values of the settings""" + values: JSON! +} + +interface FormSchema { + """The data schema for the form""" + dataSchema: JSON! + + """The UI schema for the form""" + uiSchema: JSON! + + """The current values of the form""" + values: JSON! +} + +""" +The `JSON` scalar type represents JSON values as specified by [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf). +""" +scalar JSON + +type ApiKeyFormSettings implements Node & FormSchema { + id: PrefixedID! + + """The data schema for the API key form""" + dataSchema: JSON! + + """The UI schema for the API key form""" + uiSchema: JSON! + + """The current values of the API key form""" + values: JSON! +} + +type UpdateSettingsResponse { + """Whether a restart is required for the changes to take effect""" + restartRequired: Boolean! + + """The updated settings values""" + values: JSON! + + """Warning messages about configuration issues found during validation""" + warnings: [String!] +} + +type Settings implements Node { + id: PrefixedID! + + """A view of all settings""" + unified: UnifiedSettings! + + """SSO settings""" + sso: SsoSettings! + + """The API setting values""" + api: ApiConfig! +} + +type RCloneDrive { + """Provider name""" + name: String! + + """Provider options and configuration schema""" + options: JSON! +} + +type RCloneBackupConfigForm { + id: ID! + dataSchema: JSON! + uiSchema: JSON! +} + +type RCloneBackupSettings { + configForm(formOptions: RCloneConfigFormInput): RCloneBackupConfigForm! + drives: [RCloneDrive!]! + remotes: [RCloneRemote!]! +} + +input RCloneConfigFormInput { + providerType: String + showAdvanced: Boolean = false + parameters: JSON +} + +type RCloneRemote { + name: String! + type: String! + parameters: JSON! + + """Complete remote configuration""" + config: JSON! +} + +type ArrayMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Set array state + """ + setState(input: ArrayStateInput!): UnraidArray! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Add new disk to array + """ + addDiskToArray(input: ArrayDiskInput!): UnraidArray! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Remove existing disk from array. NOTE: The array must be stopped before running this otherwise it'll throw an error. + """ + removeDiskFromArray(input: ArrayDiskInput!): UnraidArray! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Mount a disk in the array + """ + mountArrayDisk(id: PrefixedID!): ArrayDisk! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Unmount a disk from the array + """ + unmountArrayDisk(id: PrefixedID!): ArrayDisk! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Clear statistics for a disk in the array + """ + clearArrayDiskStatistics(id: PrefixedID!): Boolean! +} + +input ArrayStateInput { + """Array state""" + desiredState: ArrayStateInputState! +} + +enum ArrayStateInputState { + START + STOP +} + +input ArrayDiskInput { + """Disk ID""" + id: PrefixedID! + + """The slot for the disk""" + slot: Int +} + +type DockerMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **DOCKER** + + #### Description: + + Start a container + """ + start(id: PrefixedID!): DockerContainer! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **DOCKER** + + #### Description: + + Stop a container + """ + stop(id: PrefixedID!): DockerContainer! +} + +type VmMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Start a virtual machine + """ + start(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Stop a virtual machine + """ + stop(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Pause a virtual machine + """ + pause(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Resume a virtual machine + """ + resume(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Force stop a virtual machine + """ + forceStop(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Reboot a virtual machine + """ + reboot(id: PrefixedID!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **VMS** + + #### Description: + + Reset a virtual machine + """ + reset(id: PrefixedID!): Boolean! +} + +"""API Key related mutations""" +type ApiKeyMutations { + """ + #### Required Permissions: + + - Action: **CREATE_ANY** + - Resource: **API_KEY** + + #### Description: + + Create an API key + """ + create(input: CreateApiKeyInput!): ApiKey! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **API_KEY** + + #### Description: + + Add a role to an API key + """ + addRole(input: AddRoleForApiKeyInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **API_KEY** + + #### Description: + + Remove a role from an API key + """ + removeRole(input: RemoveRoleFromApiKeyInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **DELETE_ANY** + - Resource: **API_KEY** + + #### Description: + + Delete one or more API keys + """ + delete(input: DeleteApiKeyInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **API_KEY** + + #### Description: + + Update an API key + """ + update(input: UpdateApiKeyInput!): ApiKey! +} + +input CreateApiKeyInput { + name: String! + description: String + roles: [Role!] + permissions: [AddPermissionInput!] + + """ + This will replace the existing key if one already exists with the same name, otherwise returns the existing key + """ + overwrite: Boolean +} + +input AddPermissionInput { + resource: Resource! + actions: [AuthAction!]! +} + +input AddRoleForApiKeyInput { + apiKeyId: PrefixedID! + role: Role! +} + +input RemoveRoleFromApiKeyInput { + apiKeyId: PrefixedID! + role: Role! +} + +input DeleteApiKeyInput { + ids: [PrefixedID!]! +} + +input UpdateApiKeyInput { + id: PrefixedID! + name: String + description: String + roles: [Role!] + permissions: [AddPermissionInput!] +} + +"""Customization related mutations""" +type CustomizationMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **CUSTOMIZATIONS** + + #### Description: + + Update the UI theme (writes dynamix.cfg) + """ + setTheme( + """Theme to apply""" + theme: ThemeName! + ): Theme! +} + +"""The theme name""" +enum ThemeName { + azure + black + gray + white +} + +""" +Parity check related mutations, WIP, response types and functionaliy will change +""" +type ParityCheckMutations { + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Start a parity check + """ + start(correct: Boolean!): JSON! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Pause a parity check + """ + pause: JSON! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Resume a parity check + """ + resume: JSON! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **ARRAY** + + #### Description: + + Cancel a parity check + """ + cancel: JSON! +} + +"""RClone related mutations""" +type RCloneMutations { + """ + #### Required Permissions: + + - Action: **CREATE_ANY** + - Resource: **FLASH** + + #### Description: + + Create a new RClone remote + """ + createRCloneRemote(input: CreateRCloneRemoteInput!): RCloneRemote! + + """ + #### Required Permissions: + + - Action: **DELETE_ANY** + - Resource: **FLASH** + + #### Description: + + Delete an existing RClone remote + """ + deleteRCloneRemote(input: DeleteRCloneRemoteInput!): Boolean! +} + +input CreateRCloneRemoteInput { + name: String! + type: String! + parameters: JSON! +} + +input DeleteRCloneRemoteInput { + name: String! +} + +type Config implements Node { + id: PrefixedID! + valid: Boolean + error: String +} + +type PublicPartnerInfo { + partnerName: String + + """Indicates if a partner logo exists""" + hasPartnerLogo: Boolean! + partnerUrl: String + + """ + The path to the partner logo image on the flash drive, relative to the activation code file + """ + partnerLogoUrl: String +} + +type ActivationCode { + code: String + partnerName: String + partnerUrl: String + serverName: String + sysModel: String + comment: String + header: String + headermetacolor: String + background: String + showBannerGradient: Boolean + theme: String +} + +type Customization { + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ACTIVATION_CODE** + """ + activationCode: ActivationCode + partnerInfo: PublicPartnerInfo + theme: Theme! +} + +type Theme { + """The theme name""" + name: ThemeName! + + """Whether to show the header banner image""" + showBannerImage: Boolean! + + """Whether to show the banner gradient""" + showBannerGradient: Boolean! + + """Whether to show the description in the header""" + showHeaderDescription: Boolean! + + """The background color of the header""" + headerBackgroundColor: String + + """The text color of the header""" + headerPrimaryTextColor: String + + """The secondary text color of the header""" + headerSecondaryTextColor: String +} + +type ExplicitStatusItem { + name: String! + updateStatus: UpdateStatus! +} + +"""Update status of a container.""" +enum UpdateStatus { + UP_TO_DATE + UPDATE_AVAILABLE + REBUILD_READY + UNKNOWN +} + +type ContainerPort { + ip: String + privatePort: Port + publicPort: Port + type: ContainerPortType! +} + +""" +A field whose value is a valid TCP port within the range of 0 to 65535: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_ports +""" +scalar Port + +enum ContainerPortType { + TCP + UDP +} + +type ContainerHostConfig { + networkMode: String! +} + +type DockerContainer implements Node { + id: PrefixedID! + names: [String!]! + image: String! + imageId: String! + command: String! + created: Int! + ports: [ContainerPort!]! + + """Total size of all files in the container (in bytes)""" + sizeRootFs: BigInt + labels: JSON + state: ContainerState! + status: String! + hostConfig: ContainerHostConfig + networkSettings: JSON + mounts: [JSON!] + autoStart: Boolean! +} + +enum ContainerState { + RUNNING + EXITED +} + +type DockerNetwork implements Node { + id: PrefixedID! + name: String! + created: String! + scope: String! + driver: String! + enableIPv6: Boolean! + ipam: JSON! + internal: Boolean! + attachable: Boolean! + ingress: Boolean! + configFrom: JSON! + configOnly: Boolean! + containers: JSON! + options: JSON! + labels: JSON! +} + +type Docker implements Node { + id: PrefixedID! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DOCKER** + """ + containers(skipCache: Boolean! = false): [DockerContainer!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DOCKER** + """ + networks(skipCache: Boolean! = false): [DockerNetwork!]! +} + +type ResolvedOrganizerView { + id: String! + name: String! + root: ResolvedOrganizerEntry! + prefs: JSON +} + +union ResolvedOrganizerEntry = ResolvedOrganizerFolder | OrganizerContainerResource | OrganizerResource + +type ResolvedOrganizerFolder { + id: String! + type: String! + name: String! + children: [ResolvedOrganizerEntry!]! +} + +type OrganizerContainerResource { + id: String! + type: String! + name: String! + meta: DockerContainer +} + +type OrganizerResource { + id: String! + type: String! + name: String! + meta: JSON +} + +type ResolvedOrganizerV1 { + version: Float! + views: [ResolvedOrganizerView!]! +} + +type FlashBackupStatus { + """Status message indicating the outcome of the backup initiation.""" + status: String! + + """Job ID if available, can be used to check job status.""" + jobId: String +} + +type Flash implements Node { + id: PrefixedID! + guid: String! + vendor: String! + product: String! +} + +type InfoGpu implements Node { + id: PrefixedID! + + """GPU type/manufacturer""" + type: String! + + """GPU type identifier""" + typeid: String! + + """Whether GPU is blacklisted""" + blacklisted: Boolean! + + """Device class""" + class: String! + + """Product ID""" + productid: String! + + """Vendor name""" + vendorname: String +} + +type InfoNetwork implements Node { + id: PrefixedID! + + """Network interface name""" + iface: String! + + """Network interface model""" + model: String + + """Network vendor""" + vendor: String + + """MAC address""" + mac: String + + """Virtual interface flag""" + virtual: Boolean + + """Network speed""" + speed: String + + """DHCP enabled flag""" + dhcp: Boolean +} + +type InfoPci implements Node { + id: PrefixedID! + + """Device type/manufacturer""" + type: String! + + """Type identifier""" + typeid: String! + + """Vendor name""" + vendorname: String + + """Vendor ID""" + vendorid: String! + + """Product name""" + productname: String + + """Product ID""" + productid: String! + + """Blacklisted status""" + blacklisted: String! + + """Device class""" + class: String! +} + +type InfoUsb implements Node { + id: PrefixedID! + + """USB device name""" + name: String! + + """USB bus number""" + bus: String + + """USB device number""" + device: String +} + +type InfoDevices implements Node { + id: PrefixedID! + + """List of GPU devices""" + gpu: [InfoGpu!] + + """List of network interfaces""" + network: [InfoNetwork!] + + """List of PCI devices""" + pci: [InfoPci!] + + """List of USB devices""" + usb: [InfoUsb!] +} + +type InfoDisplayCase implements Node { + id: PrefixedID! + + """Case image URL""" + url: String! + + """Case icon identifier""" + icon: String! + + """Error message if any""" + error: String! + + """Base64 encoded case image""" + base64: String! +} + +type InfoDisplay implements Node { + id: PrefixedID! + + """Case display configuration""" + case: InfoDisplayCase! + + """UI theme name""" + theme: ThemeName! + + """Temperature unit (C or F)""" + unit: Temperature! + + """Enable UI scaling""" + scale: Boolean! + + """Show tabs in UI""" + tabs: Boolean! + + """Enable UI resize""" + resize: Boolean! + + """Show WWN identifiers""" + wwn: Boolean! + + """Show totals""" + total: Boolean! + + """Show usage statistics""" + usage: Boolean! + + """Show text labels""" + text: Boolean! + + """Warning temperature threshold""" + warning: Int! + + """Critical temperature threshold""" + critical: Int! + + """Hot temperature threshold""" + hot: Int! + + """Maximum temperature threshold""" + max: Int + + """Locale setting""" + locale: String +} + +"""Temperature unit""" +enum Temperature { + CELSIUS + FAHRENHEIT +} + +"""CPU load for a single core""" +type CpuLoad { + """The total CPU load on a single core, in percent.""" + percentTotal: Float! + + """The percentage of time the CPU spent in user space.""" + percentUser: Float! + + """The percentage of time the CPU spent in kernel space.""" + percentSystem: Float! + + """ + The percentage of time the CPU spent on low-priority (niced) user space processes. + """ + percentNice: Float! + + """The percentage of time the CPU was idle.""" + percentIdle: Float! + + """The percentage of time the CPU spent servicing hardware interrupts.""" + percentIrq: Float! + + """The percentage of time the CPU spent running virtual machines (guest).""" + percentGuest: Float! + + """The percentage of CPU time stolen by the hypervisor.""" + percentSteal: Float! +} + +type CpuPackages implements Node { + id: PrefixedID! + + """Total CPU package power draw (W)""" + totalPower: Float! + + """Power draw per package (W)""" + power: [Float!]! + + """Temperature per package (°C)""" + temp: [Float!]! +} + +type CpuUtilization implements Node { + id: PrefixedID! + + """Total CPU load in percent""" + percentTotal: Float! + + """CPU load for each core""" + cpus: [CpuLoad!]! +} + +type InfoCpu implements Node { + id: PrefixedID! + + """CPU manufacturer""" + manufacturer: String + + """CPU brand name""" + brand: String + + """CPU vendor""" + vendor: String + + """CPU family""" + family: String + + """CPU model""" + model: String + + """CPU stepping""" + stepping: Int + + """CPU revision""" + revision: String + + """CPU voltage""" + voltage: String + + """Current CPU speed in GHz""" + speed: Float + + """Minimum CPU speed in GHz""" + speedmin: Float + + """Maximum CPU speed in GHz""" + speedmax: Float + + """Number of CPU threads""" + threads: Int + + """Number of CPU cores""" + cores: Int + + """Number of physical processors""" + processors: Int + + """CPU socket type""" + socket: String + + """CPU cache information""" + cache: JSON + + """CPU feature flags""" + flags: [String!] + + """ + Per-package array of core/thread pairs, e.g. [[[0,1],[2,3]], [[4,5],[6,7]]] + """ + topology: [[[Int!]!]!]! + packages: CpuPackages! +} + +type MemoryLayout implements Node { + id: PrefixedID! + + """Memory module size in bytes""" + size: BigInt! + + """Memory bank location (e.g., BANK 0)""" + bank: String + + """Memory type (e.g., DDR4, DDR5)""" + type: String + + """Memory clock speed in MHz""" + clockSpeed: Int + + """Part number of the memory module""" + partNum: String + + """Serial number of the memory module""" + serialNum: String + + """Memory manufacturer""" + manufacturer: String + + """Form factor (e.g., DIMM, SODIMM)""" + formFactor: String + + """Configured voltage in millivolts""" + voltageConfigured: Int + + """Minimum voltage in millivolts""" + voltageMin: Int + + """Maximum voltage in millivolts""" + voltageMax: Int +} + +type MemoryUtilization implements Node { + id: PrefixedID! + + """Total system memory in bytes""" + total: BigInt! + + """Used memory in bytes""" + used: BigInt! + + """Free memory in bytes""" + free: BigInt! + + """Available memory in bytes""" + available: BigInt! + + """Active memory in bytes""" + active: BigInt! + + """Buffer/cache memory in bytes""" + buffcache: BigInt! + + """Memory usage percentage""" + percentTotal: Float! + + """Total swap memory in bytes""" + swapTotal: BigInt! + + """Used swap memory in bytes""" + swapUsed: BigInt! + + """Free swap memory in bytes""" + swapFree: BigInt! + + """Swap usage percentage""" + percentSwapTotal: Float! +} + +type InfoMemory implements Node { + id: PrefixedID! + + """Physical memory layout""" + layout: [MemoryLayout!]! +} + +type InfoOs implements Node { + id: PrefixedID! + + """Operating system platform""" + platform: String + + """Linux distribution name""" + distro: String + + """OS release version""" + release: String + + """OS codename""" + codename: String + + """Kernel version""" + kernel: String + + """OS architecture""" + arch: String + + """Hostname""" + hostname: String + + """Fully qualified domain name""" + fqdn: String + + """OS build identifier""" + build: String + + """Service pack version""" + servicepack: String + + """Boot time ISO string""" + uptime: String + + """OS logo name""" + logofile: String + + """OS serial number""" + serial: String + + """OS started via UEFI""" + uefi: Boolean +} + +type InfoSystem implements Node { + id: PrefixedID! + + """System manufacturer""" + manufacturer: String + + """System model""" + model: String + + """System version""" + version: String + + """System serial number""" + serial: String + + """System UUID""" + uuid: String + + """System SKU""" + sku: String + + """Virtual machine flag""" + virtual: Boolean +} + +type InfoBaseboard implements Node { + id: PrefixedID! + + """Motherboard manufacturer""" + manufacturer: String + + """Motherboard model""" + model: String + + """Motherboard version""" + version: String + + """Motherboard serial number""" + serial: String + + """Motherboard asset tag""" + assetTag: String + + """Maximum memory capacity in bytes""" + memMax: Float + + """Number of memory slots""" + memSlots: Float +} + +type CoreVersions { + """Unraid version""" + unraid: String + + """Unraid API version""" + api: String + + """Kernel version""" + kernel: String +} + +type PackageVersions { + """OpenSSL version""" + openssl: String + + """Node.js version""" + node: String + + """npm version""" + npm: String + + """pm2 version""" + pm2: String + + """Git version""" + git: String + + """nginx version""" + nginx: String + + """PHP version""" + php: String + + """Docker version""" + docker: String +} + +type InfoVersions implements Node { + id: PrefixedID! + + """Core system versions""" + core: CoreVersions! + + """Software package versions""" + packages: PackageVersions +} + +type Info implements Node { + id: PrefixedID! + + """Current server time""" + time: DateTime! + + """Motherboard information""" + baseboard: InfoBaseboard! + + """CPU information""" + cpu: InfoCpu! + + """Device information""" + devices: InfoDevices! + + """Display configuration""" + display: InfoDisplay! + + """Machine ID""" + machineId: ID + + """Memory information""" + memory: InfoMemory! + + """Operating system information""" + os: InfoOs! + + """System information""" + system: InfoSystem! + + """Software versions""" + versions: InfoVersions! +} + +type LogFile { + """Name of the log file""" + name: String! + + """Full path to the log file""" + path: String! + + """Size of the log file in bytes""" + size: Int! + + """Last modified timestamp""" + modifiedAt: DateTime! +} + +type LogFileContent { + """Path to the log file""" + path: String! + + """Content of the log file""" + content: String! + + """Total number of lines in the file""" + totalLines: Int! + + """Starting line number of the content (1-indexed)""" + startLine: Int +} + +"""System metrics including CPU and memory utilization""" +type Metrics implements Node { + id: PrefixedID! + + """Current CPU utilization metrics""" + cpu: CpuUtilization + + """Current memory utilization metrics""" + memory: MemoryUtilization +} + +type Owner { + username: String! + url: String! + avatar: String! +} + +type ProfileModel implements Node { + id: PrefixedID! + username: String! + url: String! + avatar: String! +} + +type Server implements Node { + id: PrefixedID! + owner: ProfileModel! + guid: String! + apikey: String! + name: String! + + """Whether this server is online or offline""" + status: ServerStatus! + wanip: String! + lanip: String! + localurl: String! + remoteurl: String! +} + +enum ServerStatus { + ONLINE + OFFLINE + NEVER_CONNECTED +} + +type OidcAuthorizationRule { + """The claim to check (e.g., email, sub, groups, hd)""" + claim: String! + + """The comparison operator""" + operator: AuthorizationOperator! + + """The value(s) to match against""" + value: [String!]! +} + +"""Operators for authorization rule matching""" +enum AuthorizationOperator { + EQUALS + CONTAINS + ENDS_WITH + STARTS_WITH +} + +type OidcProvider { + """The unique identifier for the OIDC provider""" + id: PrefixedID! + + """Display name of the OIDC provider""" + name: String! + + """OAuth2 client ID registered with the provider""" + clientId: String! + + """OAuth2 client secret (if required by provider)""" + clientSecret: String + + """ + OIDC issuer URL (e.g., https://accounts.google.com). Required for auto-discovery via /.well-known/openid-configuration + """ + issuer: String + + """ + OAuth2 authorization endpoint URL. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + authorizationEndpoint: String + + """ + OAuth2 token endpoint URL. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + tokenEndpoint: String + + """ + JSON Web Key Set URI for token validation. If omitted, will be auto-discovered from issuer/.well-known/openid-configuration + """ + jwksUri: String + + """OAuth2 scopes to request (e.g., openid, profile, email)""" + scopes: [String!]! + + """Flexible authorization rules based on claims""" + authorizationRules: [OidcAuthorizationRule!] + + """ + Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass). Defaults to OR. + """ + authorizationRuleMode: AuthorizationRuleMode + + """Custom text for the login button""" + buttonText: String + + """URL or base64 encoded icon for the login button""" + buttonIcon: String + + """ + Button variant style from Reka UI. See https://reka-ui.com/docs/components/button + """ + buttonVariant: String + + """ + Custom CSS styles for the button (e.g., "background: linear-gradient(to right, #4f46e5, #7c3aed); border-radius: 9999px;") + """ + buttonStyle: String +} + +""" +Mode for evaluating authorization rules - OR (any rule passes) or AND (all rules must pass) +""" +enum AuthorizationRuleMode { + OR + AND +} + +type OidcConfiguration { + """List of configured OIDC providers""" + providers: [OidcProvider!]! + + """ + Default allowed redirect origins that apply to all OIDC providers (e.g., Tailscale domains) + """ + defaultAllowedOrigins: [String!] +} + +type OidcSessionValidation { + valid: Boolean! + username: String +} + +type PublicOidcProvider { + id: ID! + name: String! + buttonText: String + buttonIcon: String + buttonVariant: String + buttonStyle: String +} + +type UPSBattery { + """ + Battery charge level as a percentage (0-100). Unit: percent (%). Example: 100 means battery is fully charged + """ + chargeLevel: Int! + + """ + Estimated runtime remaining on battery power. Unit: seconds. Example: 3600 means 1 hour of runtime remaining + """ + estimatedRuntime: Int! + + """ + Battery health status. Possible values: 'Good', 'Replace', 'Unknown'. Indicates if the battery needs replacement + """ + health: String! +} + +type UPSPower { + """ + Input voltage from the wall outlet/mains power. Unit: volts (V). Example: 120.5 for typical US household voltage + """ + inputVoltage: Float! + + """ + Output voltage being delivered to connected devices. Unit: volts (V). Example: 120.5 - should match input voltage when on mains power + """ + outputVoltage: Float! + + """ + Current load on the UPS as a percentage of its capacity. Unit: percent (%). Example: 25 means UPS is loaded at 25% of its maximum capacity + """ + loadPercentage: Int! +} + +type UPSDevice { + """ + Unique identifier for the UPS device. Usually based on the model name or a generated ID + """ + id: ID! + + """Display name for the UPS device. Can be customized by the user""" + name: String! + + """UPS model name/number. Example: 'APC Back-UPS Pro 1500'""" + model: String! + + """ + Current operational status of the UPS. Common values: 'Online', 'On Battery', 'Low Battery', 'Replace Battery', 'Overload', 'Offline'. 'Online' means running on mains power, 'On Battery' means running on battery backup + """ + status: String! + + """Battery-related information""" + battery: UPSBattery! + + """Power-related information""" + power: UPSPower! +} + +type UPSConfiguration { + """ + UPS service state. Values: 'enable' or 'disable'. Controls whether the UPS monitoring service is running + """ + service: String + + """ + Type of cable connecting the UPS to the server. Common values: 'usb', 'smart', 'ether', 'custom'. Determines communication protocol + """ + upsCable: String + + """ + Custom cable configuration string. Only used when upsCable is set to 'custom'. Format depends on specific UPS model + """ + customUpsCable: String + + """ + UPS communication type. Common values: 'usb', 'net', 'snmp', 'dumb', 'pcnet', 'modbus'. Defines how the server communicates with the UPS + """ + upsType: String + + """ + Device path or network address for UPS connection. Examples: '/dev/ttyUSB0' for USB, '192.168.1.100:3551' for network. Depends on upsType setting + """ + device: String + + """ + Override UPS capacity for runtime calculations. Unit: volt-amperes (VA). Example: 1500 for a 1500VA UPS. Leave unset to use UPS-reported capacity + """ + overrideUpsCapacity: Int + + """ + Battery level threshold for shutdown. Unit: percent (%). Example: 10 means shutdown when battery reaches 10%. System will shutdown when battery drops to this level + """ + batteryLevel: Int + + """ + Runtime threshold for shutdown. Unit: minutes. Example: 5 means shutdown when 5 minutes runtime remaining. System will shutdown when estimated runtime drops below this + """ + minutes: Int + + """ + Timeout for UPS communications. Unit: seconds. Example: 0 means no timeout. Time to wait for UPS response before considering it offline + """ + timeout: Int + + """ + Kill UPS power after shutdown. Values: 'yes' or 'no'. If 'yes', tells UPS to cut power after system shutdown. Useful for ensuring complete power cycle + """ + killUps: String + + """ + Network Information Server (NIS) IP address. Default: '0.0.0.0' (listen on all interfaces). IP address for apcupsd network information server + """ + nisIp: String + + """ + Network server mode. Values: 'on' or 'off'. Enable to allow network clients to monitor this UPS + """ + netServer: String + + """ + UPS name for network monitoring. Used to identify this UPS on the network. Example: 'SERVER_UPS' + """ + upsName: String + + """ + Override UPS model name. Used for display purposes. Leave unset to use UPS-reported model + """ + modelName: String +} + +type VmDomain implements Node { + """The unique identifier for the vm (uuid)""" + id: PrefixedID! + + """A friendly name for the vm""" + name: String + + """Current domain vm state""" + state: VmState! + + """The UUID of the vm""" + uuid: String @deprecated(reason: "Use id instead") +} + +"""The state of a virtual machine""" +enum VmState { + NOSTATE + RUNNING + IDLE + PAUSED + SHUTDOWN + SHUTOFF + CRASHED + PMSUSPENDED +} + +type Vms implements Node { + id: PrefixedID! + domains: [VmDomain!] + domain: [VmDomain!] +} + +type Uptime { + timestamp: String +} + +type Service implements Node { + id: PrefixedID! + name: String + online: Boolean + uptime: Uptime + version: String +} + +type UserAccount implements Node { + id: PrefixedID! + + """The name of the user""" + name: String! + + """A description of the user""" + description: String! + + """The roles of the user""" + roles: [Role!]! + + """The permissions of the user""" + permissions: [Permission!] +} + +type Plugin { + """The name of the plugin package""" + name: String! + + """The version of the plugin package""" + version: String! + + """Whether the plugin has an API module""" + hasApiModule: Boolean + + """Whether the plugin has a CLI module""" + hasCliModule: Boolean +} + +""" +### Description: + +ID scalar type that prefixes the underlying ID with the server identifier on output and strips it on input. + +We use this scalar type to ensure that the ID is unique across all servers, allowing the same underlying resource ID to be used across different server instances. + +#### Input Behavior: + +When providing an ID as input (e.g., in arguments or input objects), the server identifier prefix (':') is optional. + +- If the prefix is present (e.g., '123:456'), it will be automatically stripped, and only the underlying ID ('456') will be used internally. +- If the prefix is absent (e.g., '456'), the ID will be used as-is. + +This makes it flexible for clients, as they don't strictly need to know or provide the server ID. + +#### Output Behavior: + +When an ID is returned in the response (output), it will *always* be prefixed with the current server's unique identifier (e.g., '123:456'). + +#### Example: + +Note: The server identifier is '123' in this example. + +##### Input (Prefix Optional): +```graphql +# Both of these are valid inputs resolving to internal ID '456' +{ + someQuery(id: "123:456") { ... } + anotherQuery(id: "456") { ... } +} +``` + +##### Output (Prefix Always Added): +```graphql +# Assuming internal ID is '456' +{ + "data": { + "someResource": { + "id": "123:456" + } + } +} +``` +""" +scalar PrefixedID + +type Query { + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **API_KEY** + """ + apiKeys: [ApiKey!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **API_KEY** + """ + apiKey(id: PrefixedID!): ApiKey + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **PERMISSION** + + #### Description: + + All possible roles for API keys + """ + apiKeyPossibleRoles: [Role!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **PERMISSION** + + #### Description: + + All possible permissions for API keys + """ + apiKeyPossiblePermissions: [Permission!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **PERMISSION** + + #### Description: + + Get the actual permissions that would be granted by a set of roles + """ + getPermissionsForRoles(roles: [Role!]!): [Permission!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **PERMISSION** + + #### Description: + + Preview the effective permissions for a combination of roles and explicit permissions + """ + previewEffectivePermissions(roles: [Role!], permissions: [AddPermissionInput!]): [Permission!]! + + """Get all available authentication actions with possession""" + getAvailableAuthActions: [AuthAction!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **API_KEY** + + #### Description: + + Get JSON Schema for API key creation form + """ + getApiKeyCreationFormSchema: ApiKeyFormSettings! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + """ + config: Config! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **FLASH** + """ + flash: Flash! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ME** + """ + me: UserAccount! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **NOTIFICATIONS** + + #### Description: + + Get all notifications + """ + notifications: Notifications! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ONLINE** + """ + online: Boolean! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **OWNER** + """ + owner: Owner! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **REGISTRATION** + """ + registration: Registration + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SERVERS** + """ + server: Server + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SERVERS** + """ + servers: [Server!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SERVICES** + """ + services: [Service!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SHARE** + """ + shares: [Share!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **VARS** + """ + vars: Vars! + isInitialSetup: Boolean! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **VMS** + + #### Description: + + Get information about all VMs on the system + """ + vms: Vms! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ARRAY** + """ + parityHistory: [ParityCheck!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ARRAY** + """ + array: UnraidArray! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CUSTOMIZATIONS** + """ + customization: Customization + publicPartnerInfo: PublicPartnerInfo + publicTheme: Theme! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DOCKER** + """ + docker: Docker! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DISK** + """ + disks: [Disk!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **DISK** + """ + disk(id: PrefixedID!): Disk! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **FLASH** + """ + rclone: RCloneBackupSettings! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + info: Info! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **LOGS** + """ + logFiles: [LogFile!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **LOGS** + """ + logFile(path: String!, lines: Int, startLine: Int): LogFileContent! + settings: Settings! + isSSOEnabled: Boolean! + + """Get public OIDC provider information for login buttons""" + publicOidcProviders: [PublicOidcProvider!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + Get all configured OIDC providers (admin only) + """ + oidcProviders: [OidcProvider!]! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + Get a specific OIDC provider by ID + """ + oidcProvider(id: PrefixedID!): OidcProvider + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + Get the full OIDC configuration (admin only) + """ + oidcConfiguration: OidcConfiguration! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + Validate an OIDC session token (internal use for CLI validation) + """ + validateOidcSession(token: String!): OidcSessionValidation! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + metrics: Metrics! + upsDevices: [UPSDevice!]! + upsDeviceById(id: String!): UPSDevice + upsConfiguration: UPSConfiguration! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **CONFIG** + + #### Description: + + List all installed plugins with their metadata + """ + plugins: [Plugin!]! +} + +type Mutation { + """Creates a new notification record""" + createNotification(input: NotificationData!): Notification! + deleteNotification(id: PrefixedID!, type: NotificationType!): NotificationOverview! + + """Deletes all archived notifications on server.""" + deleteArchivedNotifications: NotificationOverview! + + """Marks a notification as archived.""" + archiveNotification(id: PrefixedID!): Notification! + archiveNotifications(ids: [PrefixedID!]!): NotificationOverview! + archiveAll(importance: NotificationImportance): NotificationOverview! + + """Marks a notification as unread.""" + unreadNotification(id: PrefixedID!): Notification! + unarchiveNotifications(ids: [PrefixedID!]!): NotificationOverview! + unarchiveAll(importance: NotificationImportance): NotificationOverview! + + """Reads each notification to recompute & update the overview.""" + recalculateOverview: NotificationOverview! + array: ArrayMutations! + docker: DockerMutations! + vm: VmMutations! + parityCheck: ParityCheckMutations! + apiKey: ApiKeyMutations! + customization: CustomizationMutations! + rclone: RCloneMutations! + + """Initiates a flash drive backup using a configured remote.""" + initiateFlashBackup(input: InitiateFlashBackupInput!): FlashBackupStatus! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **CONFIG** + """ + updateSettings(input: JSON!): UpdateSettingsResponse! + configureUps(config: UPSConfigInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **UPDATE_ANY** + - Resource: **CONFIG** + + #### Description: + + Add one or more plugins to the API. Returns false if restart was triggered automatically, true if manual restart is required. + """ + addPlugin(input: PluginManagementInput!): Boolean! + + """ + #### Required Permissions: + + - Action: **DELETE_ANY** + - Resource: **CONFIG** + + #### Description: + + Remove one or more plugins from the API. Returns false if restart was triggered automatically, true if manual restart is required. + """ + removePlugin(input: PluginManagementInput!): Boolean! +} + +input NotificationData { + title: String! + subject: String! + description: String! + importance: NotificationImportance! + link: String +} + +input InitiateFlashBackupInput { + """The name of the remote configuration to use for the backup.""" + remoteName: String! + + """Source path to backup (typically the flash drive).""" + sourcePath: String! + + """Destination path on the remote.""" + destinationPath: String! + + """ + Additional options for the backup operation, such as --dry-run or --transfers. + """ + options: JSON +} + +input UPSConfigInput { + """Enable or disable the UPS monitoring service""" + service: UPSServiceState + + """Type of cable connecting the UPS to the server""" + upsCable: UPSCableType + + """ + Custom cable configuration (only used when upsCable is CUSTOM). Format depends on specific UPS model + """ + customUpsCable: String + + """UPS communication protocol""" + upsType: UPSType + + """ + Device path or network address for UPS connection. Examples: '/dev/ttyUSB0' for USB, '192.168.1.100:3551' for network + """ + device: String + + """ + Override UPS capacity for runtime calculations. Unit: watts (W). Leave unset to use UPS-reported capacity + """ + overrideUpsCapacity: Int + + """ + Battery level percentage to initiate shutdown. Unit: percent (%) - Valid range: 0-100 + """ + batteryLevel: Int + + """Runtime left in minutes to initiate shutdown. Unit: minutes""" + minutes: Int + + """ + Time on battery before shutdown. Unit: seconds. Set to 0 to disable timeout-based shutdown + """ + timeout: Int + + """ + Turn off UPS power after system shutdown. Useful for ensuring complete power cycle + """ + killUps: UPSKillPower +} + +"""Service state for UPS daemon""" +enum UPSServiceState { + ENABLE + DISABLE +} + +"""UPS cable connection types""" +enum UPSCableType { + USB + SIMPLE + SMART + ETHER + CUSTOM +} + +"""UPS communication protocols""" +enum UPSType { + USB + APCSMART + NET + SNMP + DUMB + PCNET + MODBUS +} + +"""Kill UPS power after shutdown option""" +enum UPSKillPower { + YES + NO +} + +input PluginManagementInput { + """Array of plugin package names to add or remove""" + names: [String!]! + + """ + Whether to treat plugins as bundled plugins. Bundled plugins are installed to node_modules at build time and controlled via config only. + """ + bundled: Boolean! = false + + """ + Whether to restart the API after the operation. When false, a restart has already been queued. + """ + restart: Boolean! = true +} + +type Subscription { + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **NOTIFICATIONS** + """ + notificationAdded: Notification! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **NOTIFICATIONS** + """ + notificationsOverview: NotificationOverview! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **OWNER** + """ + ownerSubscription: Owner! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **SERVERS** + """ + serversSubscription: Server! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ARRAY** + """ + parityHistorySubscription: ParityCheck! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **ARRAY** + """ + arraySubscription: UnraidArray! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **LOGS** + """ + logFile(path: String!): LogFileContent! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + systemMetricsCpu: CpuUtilization! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + systemMetricsCpuTelemetry: CpuPackages! + + """ + #### Required Permissions: + + - Action: **READ_ANY** + - Resource: **INFO** + """ + systemMetricsMemory: MemoryUtilization! + upsUpdates: UPSDevice! +} diff --git a/skills/unraid/references/troubleshooting.md b/skills/unraid/references/troubleshooting.md new file mode 100644 index 0000000..9d76811 --- /dev/null +++ b/skills/unraid/references/troubleshooting.md @@ -0,0 +1,34 @@ +# Unraid API Troubleshooting Guide + +Common issues and solutions when working with the Unraid GraphQL API. + +## "Cannot query field" error +Field name doesn't exist in your Unraid version. Use introspection to find valid fields: +```bash +./scripts/unraid-query.sh -q "{ __type(name: \"TypeName\") { fields { name } } }" +``` + +## "API key validation failed" +- Check API key is correct and not truncated +- Verify key has appropriate permissions (use "Viewer" role) +- Ensure URL includes `/graphql` endpoint (e.g. `http://host/graphql`) + +## Empty results +Many queries return empty arrays when no data exists: +- `docker.containers` - No containers running +- `vms` - No VMs configured (or VM service disabled) +- `notifications` - No active alerts +- `plugins` - No plugins installed + +This is normal behavior, not an error. Ensure your scripts handle empty arrays gracefully. + +## "VMs are not available" (GraphQL Error) +If the VM manager is disabled in Unraid settings, querying `{ vms { ... } }` will return a GraphQL error. +**Solution:** Check if VM service is enabled before querying, or use error handling (like `IGNORE_ERRORS=true` in dashboard scripts) to process partial data. + +## URL connection issues +- Use HTTPS (not HTTP) for remote access if configured +- For local access: `http://unraid-server-ip/graphql` +- For Unraid Connect: Use provided URL with token in hostname +- Use `-k` (insecure) with curl if using self-signed certs on local HTTPS +- Use `-L` (follow redirects) if Unraid redirects HTTP to HTTPS diff --git a/skills/unraid/scripts/dashboard.sh b/skills/unraid/scripts/dashboard.sh new file mode 100755 index 0000000..7918809 --- /dev/null +++ b/skills/unraid/scripts/dashboard.sh @@ -0,0 +1,214 @@ +#!/bin/bash +# Complete Unraid Monitoring Dashboard (Multi-Server) +# Gets system status, disk health, and resource usage for all configured servers + +set -euo pipefail + +SCRIPT_DIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +source "$REPO_ROOT/lib/load-env.sh" + +QUERY_SCRIPT="$SCRIPT_DIR/unraid-query.sh" +OUTPUT_FILE="$HOME/memory/bank/unraid-inventory.md" + +# Load credentials from .env for all servers +load_env_file || exit 1 +for server in "TOOTIE" "SHART"; do + url_var="UNRAID_${server}_URL" + key_var="UNRAID_${server}_API_KEY" + name_var="UNRAID_${server}_NAME" + validate_env_vars "$url_var" "$key_var" || exit 1 +done + +# Ensure output directory exists +mkdir -p "$(dirname "$OUTPUT_FILE")" + +# Start the report +echo "# Unraid Fleet Dashboard" > "$OUTPUT_FILE" +echo "Generated at: $(date)" >> "$OUTPUT_FILE" +echo "" >> "$OUTPUT_FILE" + +# Function to process a single server +process_server() { + local NAME="$1" + local URL="$2" + local API_KEY="$3" + + echo "Querying server: $NAME..." + + export UNRAID_URL="$URL" + export UNRAID_API_KEY="$API_KEY" + export IGNORE_ERRORS="true" + + QUERY='query Dashboard { + info { + time + cpu { model cores threads } + os { platform distro release arch } + system { manufacturer model version uuid } + } + metrics { + cpu { percentTotal } + memory { total used free percentTotal } + } + array { + state + capacity { kilobytes { total free used } } + disks { name device temp status fsSize fsFree fsUsed isSpinning numErrors } + caches { name device temp status fsSize fsFree fsUsed fsType type } + parityCheckStatus { status progress errors } + } + disks { id name device size status temp numErrors } + shares { name comment free } + docker { + containers { names image state status } + } + vms { domains { id name state } } + vars { timeZone regTy regTo } + notifications { id title subject description importance timestamp } + recentLog: logFile(path: \"syslog\", lines: 50) { content } + online + isSSOEnabled + }' + + RESPONSE=$("$QUERY_SCRIPT" -q "$QUERY" -f json) + + # Debug output + echo "$RESPONSE" > "${NAME}_debug.json" + + # Check if response is valid JSON + if ! echo "$RESPONSE" | jq -e . >/dev/null 2>&1; then + echo "Error querying $NAME: Invalid response" + echo "Response saved to ${NAME}_debug.json" + echo "## Server: $NAME (⚠️ Error)" >> "$OUTPUT_FILE" + echo "Failed to retrieve data." >> "$OUTPUT_FILE" + return + fi + + # Append to report + echo "## Server: $NAME" >> "$OUTPUT_FILE" + + # System Info + CPU_MODEL=$(echo "$RESPONSE" | jq -r '.data.info.cpu.model') + CPU_CORES=$(echo "$RESPONSE" | jq -r '.data.info.cpu.cores') + CPU_THREADS=$(echo "$RESPONSE" | jq -r '.data.info.cpu.threads') + OS_REL=$(echo "$RESPONSE" | jq -r '.data.info.os.release') + OS_ARCH=$(echo "$RESPONSE" | jq -r '.data.info.os.arch // "x64"') + SYS_MFG=$(echo "$RESPONSE" | jq -r '.data.info.system.manufacturer // "Unknown"') + SYS_MODEL=$(echo "$RESPONSE" | jq -r '.data.info.system.model // "Unknown"') + TIMEZONE=$(echo "$RESPONSE" | jq -r '.data.vars.timeZone // "N/A"') + LICENSE=$(echo "$RESPONSE" | jq -r '.data.vars.regTy // "Unknown"') + REG_TO=$(echo "$RESPONSE" | jq -r '.data.vars.regTo // "N/A"') + CPU_LOAD=$(echo "$RESPONSE" | jq -r '.data.metrics.cpu.percentTotal // 0') + TOTAL_MEM=$(echo "$RESPONSE" | jq -r '.data.metrics.memory.total // 0') + MEM_USED_PCT=$(echo "$RESPONSE" | jq -r '.data.metrics.memory.percentTotal // 0') + TOTAL_MEM_GB=$((TOTAL_MEM / 1024 / 1024 / 1024)) + + echo "### System" >> "$OUTPUT_FILE" + echo "- **Hardware:** $SYS_MFG $SYS_MODEL" >> "$OUTPUT_FILE" + echo "- **OS:** Unraid $OS_REL ($OS_ARCH)" >> "$OUTPUT_FILE" + echo "- **License:** $LICENSE (Registered to: $REG_TO)" >> "$OUTPUT_FILE" + echo "- **Timezone:** $TIMEZONE" >> "$OUTPUT_FILE" + echo "- **CPU:** Model $CPU_MODEL ($CPU_CORES cores / $CPU_THREADS threads) - **${CPU_LOAD}% load**" >> "$OUTPUT_FILE" + echo "- **Memory:** ${TOTAL_MEM_GB}GB - **${MEM_USED_PCT}% used**" >> "$OUTPUT_FILE" + echo "" >> "$OUTPUT_FILE" + + # Array capacity + ARRAY_TOTAL=$(echo "$RESPONSE" | jq -r '.data.array.capacity.kilobytes.total') + ARRAY_FREE=$(echo "$RESPONSE" | jq -r '.data.array.capacity.kilobytes.free') + ARRAY_USED=$(echo "$RESPONSE" | jq -r '.data.array.capacity.kilobytes.used') + + if [ "$ARRAY_TOTAL" != "null" ] && [ "$ARRAY_TOTAL" -gt 0 ]; then + ARRAY_TOTAL_GB=$((ARRAY_TOTAL / 1024 / 1024)) + ARRAY_FREE_GB=$((ARRAY_FREE / 1024 / 1024)) + ARRAY_USED_GB=$((ARRAY_USED / 1024 / 1024)) + ARRAY_USED_PCT=$((ARRAY_USED * 100 / ARRAY_TOTAL)) + echo "### Storage" >> "$OUTPUT_FILE" + echo "- **Array:** ${ARRAY_USED_GB}GB / ${ARRAY_TOTAL_GB}GB used (${ARRAY_USED_PCT}%)" >> "$OUTPUT_FILE" + fi + + # Cache pools + echo "- **Cache Pools:**" >> "$OUTPUT_FILE" + echo "$RESPONSE" | jq -r '.data.array.caches[] | " - \(.name) (\(.device)): \(.temp)°C - \(.status) - \(if .fsSize then "\((.fsUsed / 1024 / 1024 | floor))GB / \((.fsSize / 1024 / 1024 | floor))GB used" else "N/A" end)"' >> "$OUTPUT_FILE" + + # Docker + TOTAL_CONTAINERS=$(echo "$RESPONSE" | jq '[.data.docker.containers[]] | length') + RUNNING_CONTAINERS=$(echo "$RESPONSE" | jq '[.data.docker.containers[] | select(.state == "RUNNING")] | length') + + echo "" >> "$OUTPUT_FILE" + echo "### Workloads" >> "$OUTPUT_FILE" + echo "- **Docker:** ${TOTAL_CONTAINERS} containers (${RUNNING_CONTAINERS} running)" >> "$OUTPUT_FILE" + + # Unhealthy containers + UNHEALTHY=$(echo "$RESPONSE" | jq -r '.data.docker.containers[] | select(.status | test("unhealthy|restarting"; "i")) | " - ⚠️ \(.names[0]): \(.status)"') + if [ -n "$UNHEALTHY" ]; then + echo "$UNHEALTHY" >> "$OUTPUT_FILE" + fi + + # VMs + if [ "$(echo "$RESPONSE" | jq -r '.data.vms.domains')" != "null" ]; then + TOTAL_VMS=$(echo "$RESPONSE" | jq '[.data.vms.domains[]] | length') + RUNNING_VMS=$(echo "$RESPONSE" | jq '[.data.vms.domains[] | select(.state == "RUNNING")] | length') + echo "- **VMs:** ${TOTAL_VMS} VMs (${RUNNING_VMS} running)" >> "$OUTPUT_FILE" + else + echo "- **VMs:** Service disabled or no data" >> "$OUTPUT_FILE" + fi + + # Disk Health + echo "" >> "$OUTPUT_FILE" + echo "### Health" >> "$OUTPUT_FILE" + + HOT_DISKS=$(echo "$RESPONSE" | jq -r '.data.array.disks[] | select(.temp > 45) | "- ⚠️ \(.name): \(.temp)°C (HIGH)"') + DISK_ERRORS=$(echo "$RESPONSE" | jq -r '.data.array.disks[] | select(.numErrors > 0) | "- ❌ \(.name): \(.numErrors) errors"') + + if [ -z "$HOT_DISKS" ] && [ -z "$DISK_ERRORS" ]; then + echo "- ✅ All disks healthy" >> "$OUTPUT_FILE" + else + [ -n "$HOT_DISKS" ] && echo "$HOT_DISKS" >> "$OUTPUT_FILE" + [ -n "$DISK_ERRORS" ] && echo "$DISK_ERRORS" >> "$OUTPUT_FILE" + fi + + # Notifications (Alerts) + echo "" >> "$OUTPUT_FILE" + echo "### Notifications" >> "$OUTPUT_FILE" + + NOTIF_COUNT=$(echo "$RESPONSE" | jq '[.data.notifications[]] | length' 2>/dev/null || echo "0") + if [ "$NOTIF_COUNT" -gt 0 ] && [ "$NOTIF_COUNT" != "null" ]; then + # Show recent notifications (last 10) + ALERT_NOTIFS=$(echo "$RESPONSE" | jq -r '.data.notifications | sort_by(.timestamp) | reverse | .[0:10][] | "- [\(.importance // "info")] \(.title // .subject): \(.description // "No description") (\(.timestamp | split("T")[0]))"' 2>/dev/null) + if [ -n "$ALERT_NOTIFS" ]; then + echo "$ALERT_NOTIFS" >> "$OUTPUT_FILE" + else + echo "- ✅ No recent notifications" >> "$OUTPUT_FILE" + fi + + # Count by importance + ALERT_COUNT=$(echo "$RESPONSE" | jq '[.data.notifications[] | select(.importance == "alert" or .importance == "warning")] | length' 2>/dev/null || echo "0") + if [ "$ALERT_COUNT" -gt 0 ]; then + echo "" >> "$OUTPUT_FILE" + echo "**⚠️ $ALERT_COUNT alert/warning notifications**" >> "$OUTPUT_FILE" + fi + else + echo "- ✅ No notifications" >> "$OUTPUT_FILE" + fi + + echo "" >> "$OUTPUT_FILE" + echo "---" >> "$OUTPUT_FILE" + echo "" >> "$OUTPUT_FILE" +} + +# Main loop - process each server from environment variables +for server in "TOOTIE" "SHART"; do + name_var="UNRAID_${server}_NAME" + url_var="UNRAID_${server}_URL" + key_var="UNRAID_${server}_API_KEY" + + NAME="${!name_var}" + URL="${!url_var}" + KEY="${!key_var}" + + process_server "$NAME" "$URL" "$KEY" +done + +echo "Dashboard saved to: $OUTPUT_FILE" +cat "$OUTPUT_FILE" diff --git a/skills/unraid/scripts/unraid-query.sh b/skills/unraid/scripts/unraid-query.sh new file mode 100755 index 0000000..4a2eee0 --- /dev/null +++ b/skills/unraid/scripts/unraid-query.sh @@ -0,0 +1,126 @@ +#!/bin/bash +# Unraid GraphQL API Query Helper +# Makes it easy to query the Unraid API from the command line + +set -e + +# Usage function +usage() { + cat << EOF +Usage: $0 [OPTIONS] + +Query the Unraid GraphQL API + +OPTIONS: + -u, --url URL Unraid server URL (required) + -k, --key KEY API key (required) + -q, --query QUERY GraphQL query (required) + -f, --format FORMAT Output format: json (default), raw, pretty + -h, --help Show this help message + +ENVIRONMENT VARIABLES: + UNRAID_URL Default Unraid server URL + UNRAID_API_KEY Default API key + +EXAMPLES: + # Get system status + $0 -u https://unraid.local/graphql -k YOUR_KEY -q "{ online }" + + # Use environment variables + export UNRAID_URL="https://unraid.local/graphql" + export UNRAID_API_KEY="your-api-key" + $0 -q "{ metrics { cpu { percentTotal } } }" + + # Pretty print output + $0 -q "{ array { state } }" -f pretty + +EOF + exit 1 +} + +# Default values +URL="${UNRAID_URL:-}" +API_KEY="${UNRAID_API_KEY:-}" +QUERY="" +FORMAT="json" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + -u|--url) + URL="$2" + shift 2 + ;; + -k|--key) + API_KEY="$2" + shift 2 + ;; + -q|--query) + QUERY="$2" + shift 2 + ;; + -f|--format) + FORMAT="$2" + shift 2 + ;; + -h|--help) + usage + ;; + *) + echo "Unknown option: $1" + usage + ;; + esac +done + +# Validate required arguments +if [[ -z "$URL" ]]; then + echo "Error: Unraid URL is required (use -u or set UNRAID_URL)" + exit 1 +fi + +if [[ -z "$API_KEY" ]]; then + echo "Error: API key is required (use -k or set UNRAID_API_KEY)" + exit 1 +fi + +if [[ -z "$QUERY" ]]; then + echo "Error: GraphQL query is required (use -q)" + exit 1 +fi + +# Make the request +RESPONSE=$(curl -skL -X POST "$URL" \ + -H "Content-Type: application/json" \ + -H "x-api-key: $API_KEY" \ + -d "{\"query\":\"$QUERY\"}") + +# Check for errors +if echo "$RESPONSE" | jq -e '.errors' > /dev/null 2>&1; then + # If we have data despite errors, and --ignore-errors is set, continue + if [[ "$IGNORE_ERRORS" == "true" ]] && echo "$RESPONSE" | jq -e '.data' > /dev/null 2>&1; then + echo "GraphQL Warning:" >&2 + echo "$RESPONSE" | jq -r '.errors[0].message' >&2 + else + echo "GraphQL Error:" >&2 + echo "$RESPONSE" | jq -r '.errors[0].message' >&2 + exit 1 + fi +fi + +# Output based on format +case "$FORMAT" in + json) + echo "$RESPONSE" + ;; + raw) + echo "$RESPONSE" | jq -r '.data' + ;; + pretty) + echo "$RESPONSE" | jq '.' + ;; + *) + echo "Unknown format: $FORMAT" >&2 + exit 1 + ;; +esac diff --git a/skills/unraid/setup.sh b/skills/unraid/setup.sh new file mode 100755 index 0000000..ab8aa03 --- /dev/null +++ b/skills/unraid/setup.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Setup script for Unraid MCP Plugin +# Installs the MCP server dependencies + +set -euo pipefail + +PLUGIN_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$PLUGIN_ROOT/../.." && pwd)" + +echo "=== Unraid MCP Plugin Setup ===" +echo "" +echo "Plugin root: $PLUGIN_ROOT" +echo "Project root: $PROJECT_ROOT" +echo "" + +# Check if uv is installed +if ! command -v uv &> /dev/null; then + echo "Error: 'uv' is not installed." + echo "Install it with: curl -LsSf https://astral.sh/uv/install.sh | sh" + exit 1 +fi + +echo "✓ uv is installed" + +# Navigate to project root and install dependencies +cd "$PROJECT_ROOT" + +echo "Installing Python dependencies..." +uv sync + +echo "" +echo "✓ Setup complete!" +echo "" +echo "Configure your Unraid server by setting these environment variables:" +echo " export UNRAID_API_URL='http://your-unraid-server/graphql'" +echo " export UNRAID_API_KEY='your-api-key'" +echo "" +echo "Test the MCP server with:" +echo " uv run unraid-mcp-server" diff --git a/tests/test_info.py b/tests/test_info.py index e445678..cb5a759 100644 --- a/tests/test_info.py +++ b/tests/test_info.py @@ -52,6 +52,20 @@ class TestAnalyzeDiskHealth: disks = [{"status": "DISK_OK", "warning": 45}] result = _analyze_disk_health(disks) assert result["warning"] == 1 + assert result["critical"] == 0 + + def test_counts_critical_disks(self) -> None: + disks = [{"status": "DISK_OK", "critical": 55}] + result = _analyze_disk_health(disks) + assert result["critical"] == 1 + assert result["warning"] == 0 + assert result["healthy"] == 0 + + def test_critical_takes_precedence_over_warning(self) -> None: + disks = [{"status": "DISK_OK", "warning": 45, "critical": 55}] + result = _analyze_disk_health(disks) + assert result["critical"] == 1 + assert result["warning"] == 0 def test_counts_missing_disks(self) -> None: disks = [{"status": "DISK_NP"}] @@ -76,6 +90,16 @@ class TestProcessArrayStatus: assert result["summary"]["state"] == "STARTED" assert result["summary"]["overall_health"] == "HEALTHY" + def test_critical_disk_threshold_array(self) -> None: + raw = { + "state": "STARTED", + "parities": [], + "disks": [{"status": "DISK_OK", "critical": 55}], + "caches": [], + } + result = _process_array_status(raw) + assert result["summary"]["overall_health"] == "CRITICAL" + def test_degraded_array(self) -> None: raw = { "state": "STARTED", diff --git a/tests/test_storage.py b/tests/test_storage.py index b638251..2ab656b 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -60,6 +60,15 @@ class TestStorageValidation: with pytest.raises(ToolError, match="log_path must start with"): await tool_fn(action="logs", log_path="/etc/shadow") + async def test_logs_rejects_path_traversal(self, _mock_graphql: AsyncMock) -> None: + tool_fn = _make_tool() + # Traversal that escapes /var/log/ to reach /etc/shadow + with pytest.raises(ToolError, match="log_path must start with"): + await tool_fn(action="logs", log_path="/var/log/../../etc/shadow") + # Traversal that escapes /mnt/ to reach /etc/passwd + with pytest.raises(ToolError, match="log_path must start with"): + await tool_fn(action="logs", log_path="/mnt/../etc/passwd") + async def test_logs_allows_valid_paths(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "ok"}} tool_fn = _make_tool() diff --git a/tests/test_users.py b/tests/test_users.py index 901c6ca..fc61667 100644 --- a/tests/test_users.py +++ b/tests/test_users.py @@ -42,7 +42,7 @@ class TestUsersValidation: class TestUsersActions: async def test_me(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"me": {"id": "u:1", "name": "root", "role": "ADMIN"}} + _mock_graphql.return_value = {"me": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]}} tool_fn = _make_tool() result = await tool_fn(action="me") assert result["name"] == "root" @@ -56,19 +56,19 @@ class TestUsersActions: assert len(result["users"]) == 2 async def test_get(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"user": {"id": "u:1", "name": "root", "role": "ADMIN"}} + _mock_graphql.return_value = {"user": {"id": "u:1", "name": "root", "description": "", "roles": ["ADMIN"]}} tool_fn = _make_tool() result = await tool_fn(action="get", user_id="u:1") assert result["name"] == "root" async def test_add(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"addUser": {"id": "u:3", "name": "newuser", "role": "USER"}} + _mock_graphql.return_value = {"addUser": {"id": "u:3", "name": "newuser", "description": "", "roles": ["USER"]}} tool_fn = _make_tool() result = await tool_fn(action="add", name="newuser", password="pass123") assert result["success"] is True async def test_add_with_role(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"addUser": {"id": "u:3", "name": "admin2", "role": "ADMIN"}} + _mock_graphql.return_value = {"addUser": {"id": "u:3", "name": "admin2", "description": "", "roles": ["ADMIN"]}} tool_fn = _make_tool() result = await tool_fn(action="add", name="admin2", password="pass123", role="admin") assert result["success"] is True @@ -76,10 +76,12 @@ class TestUsersActions: assert call_args[0][1]["input"]["role"] == "ADMIN" async def test_delete(self, _mock_graphql: AsyncMock) -> None: - _mock_graphql.return_value = {"deleteUser": True} + _mock_graphql.return_value = {"deleteUser": {"id": "u:2", "name": "guest"}} tool_fn = _make_tool() result = await tool_fn(action="delete", user_id="u:2", confirm=True) assert result["success"] is True + call_args = _mock_graphql.call_args + assert call_args[0][1]["input"]["id"] == "u:2" async def test_cloud(self, _mock_graphql: AsyncMock) -> None: _mock_graphql.return_value = {"cloud": {"status": "connected", "apiKey": "***"}} @@ -98,3 +100,31 @@ class TestUsersActions: tool_fn = _make_tool() result = await tool_fn(action="origins") assert len(result["origins"]) == 2 + + +class TestUsersNoneHandling: + """Verify actions return empty dict (not TypeError) when API returns None.""" + + async def test_me_returns_none(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"me": None} + tool_fn = _make_tool() + result = await tool_fn(action="me") + assert result == {} + + async def test_get_returns_none(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"user": None} + tool_fn = _make_tool() + result = await tool_fn(action="get", user_id="u:1") + assert result == {} + + async def test_cloud_returns_none(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"cloud": None} + tool_fn = _make_tool() + result = await tool_fn(action="cloud") + assert result == {} + + async def test_remote_access_returns_none(self, _mock_graphql: AsyncMock) -> None: + _mock_graphql.return_value = {"remoteAccess": None} + tool_fn = _make_tool() + result = await tool_fn(action="remote_access") + assert result == {} diff --git a/unraid_mcp/subscriptions/manager.py b/unraid_mcp/subscriptions/manager.py index 4825d9a..8044ef1 100644 --- a/unraid_mcp/subscriptions/manager.py +++ b/unraid_mcp/subscriptions/manager.py @@ -8,6 +8,7 @@ error handling, reconnection logic, and authentication. import asyncio import json import os +import ssl from datetime import datetime from typing import Any @@ -153,6 +154,16 @@ class SubscriptionManager: logger.debug(f"[WEBSOCKET:{subscription_name}] Connecting to: {ws_url}") logger.debug(f"[WEBSOCKET:{subscription_name}] API Key present: {'Yes' if UNRAID_API_KEY else 'No'}") + # Build SSL context for wss:// connections + ssl_context = None + if ws_url.startswith('wss://'): + if isinstance(UNRAID_VERIFY_SSL, str): + ssl_context = ssl.create_default_context(cafile=UNRAID_VERIFY_SSL) + elif UNRAID_VERIFY_SSL: + ssl_context = ssl.create_default_context() + else: + ssl_context = ssl._create_unverified_context() + # Connection with timeout connect_timeout = 10 logger.debug(f"[WEBSOCKET:{subscription_name}] Connection timeout: {connect_timeout}s") @@ -163,7 +174,7 @@ class SubscriptionManager: ping_interval=20, ping_timeout=10, close_timeout=10, - ssl=UNRAID_VERIFY_SSL + ssl=ssl_context ) as websocket: selected_proto = websocket.subprotocol or "none" diff --git a/unraid_mcp/tools/docker.py b/unraid_mcp/tools/docker.py index 9a3b2e2..e82f841 100644 --- a/unraid_mcp/tools/docker.py +++ b/unraid_mcp/tools/docker.py @@ -308,7 +308,13 @@ def register_docker_tool(mcp: FastMCP) -> None: } docker_data = data.get("docker", {}) - result = docker_data.get(action, docker_data.get("removeContainer")) + # Map action names to GraphQL response field names where they differ + response_field_map = { + "update": "updateContainer", + "remove": "removeContainer", + } + field = response_field_map.get(action, action) + result = docker_data.get(field) return { "success": True, "action": action, diff --git a/unraid_mcp/tools/info.py b/unraid_mcp/tools/info.py index b5802d8..91d2d0a 100644 --- a/unraid_mcp/tools/info.py +++ b/unraid_mcp/tools/info.py @@ -204,13 +204,18 @@ def _process_system_info(raw_info: dict[str, Any]) -> dict[str, Any]: def _analyze_disk_health(disks: list[dict[str, Any]]) -> dict[str, int]: """Analyze health status of disk arrays.""" - counts = {"healthy": 0, "failed": 0, "missing": 0, "new": 0, "warning": 0, "unknown": 0} + counts = {"healthy": 0, "failed": 0, "missing": 0, "new": 0, "warning": 0, "critical": 0, "unknown": 0} for disk in disks: status = disk.get("status", "").upper() warning = disk.get("warning") critical = disk.get("critical") if status == "DISK_OK": - counts["warning" if (warning or critical) else "healthy"] += 1 + if critical: + counts["critical"] += 1 + elif warning: + counts["warning"] += 1 + else: + counts["healthy"] += 1 elif status in ("DISK_DSBL", "DISK_INVALID"): counts["failed"] += 1 elif status == "DISK_NP": @@ -254,10 +259,11 @@ def _process_array_status(raw: dict[str, Any]) -> dict[str, Any]: health_summary[label] = _analyze_disk_health(raw[key]) total_failed = sum(h.get("failed", 0) for h in health_summary.values()) + total_critical = sum(h.get("critical", 0) for h in health_summary.values()) total_missing = sum(h.get("missing", 0) for h in health_summary.values()) total_warning = sum(h.get("warning", 0) for h in health_summary.values()) - if total_failed > 0: + if total_failed > 0 or total_critical > 0: overall = "CRITICAL" elif total_missing > 0: overall = "DEGRADED" diff --git a/unraid_mcp/tools/keys.py b/unraid_mcp/tools/keys.py index a3812d3..df88dc9 100644 --- a/unraid_mcp/tools/keys.py +++ b/unraid_mcp/tools/keys.py @@ -111,7 +111,7 @@ def register_keys_tool(mcp: FastMCP) -> None: if action == "update": if not key_id: raise ToolError("key_id is required for 'update' action") - input_data = {"id": key_id} + input_data: dict[str, Any] = {"id": key_id} if name: input_data["name"] = name if roles: @@ -130,6 +130,9 @@ def register_keys_tool(mcp: FastMCP) -> None: data = await make_graphql_request( MUTATIONS["delete"], {"input": {"ids": [key_id]}} ) + result = data.get("deleteApiKeys") + if not result: + raise ToolError(f"Failed to delete API key '{key_id}': no confirmation from server") return { "success": True, "message": f"API key '{key_id}' deleted", diff --git a/unraid_mcp/tools/rclone.py b/unraid_mcp/tools/rclone.py index 197b8a2..3a8c322 100644 --- a/unraid_mcp/tools/rclone.py +++ b/unraid_mcp/tools/rclone.py @@ -100,7 +100,9 @@ def register_rclone_tool(mcp: FastMCP) -> None: MUTATIONS["create_remote"], {"input": {"name": name, "type": provider_type, "config": config_data}}, ) - remote = data.get("rclone", {}).get("createRCloneRemote", {}) + remote = data.get("rclone", {}).get("createRCloneRemote") + if not remote: + raise ToolError(f"Failed to create remote '{name}': no confirmation from server") return { "success": True, "message": f"Remote '{name}' created successfully", diff --git a/unraid_mcp/tools/storage.py b/unraid_mcp/tools/storage.py index 886dafd..b614edd 100644 --- a/unraid_mcp/tools/storage.py +++ b/unraid_mcp/tools/storage.py @@ -4,6 +4,7 @@ Provides the `unraid_storage` tool with 6 actions for shares, physical disks, unassigned devices, log files, and log content retrieval. """ +import posixpath from typing import Any, Literal from fastmcp import FastMCP @@ -99,11 +100,14 @@ def register_storage_tool(mcp: FastMCP) -> None: if not log_path: raise ToolError("log_path is required for 'logs' action") _ALLOWED_LOG_PREFIXES = ("/var/log/", "/boot/logs/", "/mnt/") - if not any(log_path.startswith(p) for p in _ALLOWED_LOG_PREFIXES): + # Normalize path to prevent traversal attacks (e.g. /var/log/../../etc/shadow) + normalized = posixpath.normpath(log_path) + if not any(normalized.startswith(p) for p in _ALLOWED_LOG_PREFIXES): raise ToolError( f"log_path must start with one of: {', '.join(_ALLOWED_LOG_PREFIXES)}. " f"Use log_files action to discover valid paths." ) + log_path = normalized query = QUERIES[action] variables: dict[str, Any] | None = None diff --git a/unraid_mcp/tools/users.py b/unraid_mcp/tools/users.py index ee29aa5..a199be5 100644 --- a/unraid_mcp/tools/users.py +++ b/unraid_mcp/tools/users.py @@ -15,17 +15,17 @@ from ..core.exceptions import ToolError QUERIES: dict[str, str] = { "me": """ query GetMe { - me { id name role email } + me { id name description roles } } """, "list": """ query ListUsers { - users { id name role email } + users { id name description roles } } """, "get": """ - query GetUser($id: PrefixedID!) { - user(id: $id) { id name role email } + query GetUser($id: ID!) { + user(id: $id) { id name description roles } } """, "cloud": """ @@ -47,13 +47,13 @@ QUERIES: dict[str, str] = { MUTATIONS: dict[str, str] = { "add": """ - mutation AddUser($input: AddUserInput!) { - addUser(input: $input) { id name role } + mutation AddUser($input: addUserInput!) { + addUser(input: $input) { id name description roles } } """, "delete": """ - mutation DeleteUser($id: PrefixedID!) { - deleteUser(id: $id) + mutation DeleteUser($input: deleteUserInput!) { + deleteUser(input: $input) { id name } } """, } @@ -101,7 +101,7 @@ def register_users_tool(mcp: FastMCP) -> None: if action == "me": data = await make_graphql_request(QUERIES["me"]) - return dict(data.get("me", {})) + return data.get("me") or {} if action == "list": data = await make_graphql_request(QUERIES["list"]) @@ -112,7 +112,7 @@ def register_users_tool(mcp: FastMCP) -> None: if not user_id: raise ToolError("user_id is required for 'get' action") data = await make_graphql_request(QUERIES["get"], {"id": user_id}) - return dict(data.get("user", {})) + return data.get("user") or {} if action == "add": if not name or not password: @@ -132,7 +132,7 @@ def register_users_tool(mcp: FastMCP) -> None: if not user_id: raise ToolError("user_id is required for 'delete' action") data = await make_graphql_request( - MUTATIONS["delete"], {"id": user_id} + MUTATIONS["delete"], {"input": {"id": user_id}} ) return { "success": True, @@ -141,11 +141,11 @@ def register_users_tool(mcp: FastMCP) -> None: if action == "cloud": data = await make_graphql_request(QUERIES["cloud"]) - return dict(data.get("cloud", {})) + return data.get("cloud") or {} if action == "remote_access": data = await make_graphql_request(QUERIES["remote_access"]) - return dict(data.get("remoteAccess", {})) + return data.get("remoteAccess") or {} if action == "origins": data = await make_graphql_request(QUERIES["origins"]) diff --git a/unraid_mcp/tools/virtualization.py b/unraid_mcp/tools/virtualization.py index 42271a3..16476f2 100644 --- a/unraid_mcp/tools/virtualization.py +++ b/unraid_mcp/tools/virtualization.py @@ -105,15 +105,16 @@ def register_vm_tool(mcp: FastMCP) -> None: if action == "list": data = await make_graphql_request(QUERIES["list"]) - if data.get("vms") and data["vms"].get("domains"): - vms = data["vms"]["domains"] - return {"vms": list(vms) if isinstance(vms, list) else []} + if data.get("vms"): + vms = data["vms"].get("domains") or data["vms"].get("domain") + if vms: + return {"vms": list(vms) if isinstance(vms, list) else []} return {"vms": []} if action == "details": data = await make_graphql_request(QUERIES["details"]) if data.get("vms"): - vms = data["vms"].get("domains") or [] + vms = data["vms"].get("domains") or data["vms"].get("domain") or [] for vm in vms: if ( vm.get("uuid") == vm_id diff --git a/uv.lock b/uv.lock index 65df554..d4c35e0 100644 --- a/uv.lock +++ b/uv.lock @@ -1985,7 +1985,7 @@ requires-dist = [ { name = "black", marker = "extra == 'dev'", specifier = ">=25.1.0" }, { name = "black", marker = "extra == 'lint'", specifier = ">=25.1.0" }, { name = "build", marker = "extra == 'dev'", specifier = ">=1.2.2" }, - { name = "fastapi", specifier = ">=0.116.1" }, + { name = "fastapi", specifier = ">=0.115.0" }, { name = "fastmcp", specifier = ">=2.11.2" }, { name = "httpx", specifier = ">=0.28.1" }, { name = "pytest", marker = "extra == 'dev'", specifier = ">=8.4.2" },