forked from HomeLab/unraid-mcp
Comprehensive fixes across Python code, shell scripts, and documentation addressing all remaining MEDIUM and LOW priority review comments. Python Code Fixes (27 fixes): - tools/info.py: Simplified dispatch with lookup tables, defensive guards, CPU fallback formatting, !s conversion flags, module-level sync assertion - tools/docker.py: Case-insensitive container ID regex, keyword-only confirm, module-level ALL_ACTIONS constant - tools/virtualization.py: Normalized single-VM dict responses, unified list/details queries - core/client.py: Fixed HTTP client singleton race condition, compound key substring matching for sensitive data redaction - subscriptions/: Extracted SSL context creation to shared helper in utils.py, replaced deprecated ssl._create_unverified_context API - tools/array.py: Renamed parity_history to parity_status, hoisted ALL_ACTIONS - tools/storage.py: Fixed dict(None) risks, temperature 0 falsiness bug - tools/notifications.py, keys.py, rclone.py: Fixed dict(None) TypeError risks - tests/: Fixed generator type annotations, added coverage for compound keys Shell Script Fixes (13 fixes): - dashboard.sh: Dynamic server discovery, conditional debug output, null-safe jq, notification count guard order, removed unused variables - unraid-query.sh: Proper JSON escaping via jq, --ignore-errors and --insecure CLI flags, TLS verification now on by default - validate-marketplace.sh: Removed unused YELLOW variable, defensive jq, simplified repository URL output Documentation Fixes (24+ fixes): - Version consistency: Updated all references to v0.2.0 across pyproject.toml, plugin.json, marketplace.json, MARKETPLACE.md, __init__.py, README files - Tool count updates: Changed all "26 tools" references to "10 tools, 90 actions" - Markdown lint: Fixed MD022, MD031, MD047 issues across multiple files - Research docs: Fixed auth headers, removed web artifacts, corrected stale info - Skills docs: Fixed query examples, endpoint counts, env var references All 227 tests pass, ruff and ty checks clean.
182 lines
7.8 KiB
Python
182 lines
7.8 KiB
Python
"""Tests for unraid_vm tool."""
|
|
|
|
from collections.abc import Generator
|
|
from unittest.mock import AsyncMock, patch
|
|
|
|
import pytest
|
|
from conftest import make_tool_fn
|
|
|
|
from unraid_mcp.core.exceptions import ToolError
|
|
|
|
|
|
@pytest.fixture
|
|
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
|
with patch("unraid_mcp.tools.virtualization.make_graphql_request", new_callable=AsyncMock) as mock:
|
|
yield mock
|
|
|
|
|
|
def _make_tool():
|
|
return make_tool_fn("unraid_mcp.tools.virtualization", "register_vm_tool", "unraid_vm")
|
|
|
|
|
|
class TestVmValidation:
|
|
async def test_actions_except_list_require_vm_id(self, _mock_graphql: AsyncMock) -> None:
|
|
tool_fn = _make_tool()
|
|
for action in ("details", "start", "stop", "pause", "resume", "reboot"):
|
|
with pytest.raises(ToolError, match="vm_id"):
|
|
await tool_fn(action=action)
|
|
|
|
async def test_destructive_actions_require_confirm(self, _mock_graphql: AsyncMock) -> None:
|
|
tool_fn = _make_tool()
|
|
for action in ("force_stop", "reset"):
|
|
with pytest.raises(ToolError, match="destructive"):
|
|
await tool_fn(action=action, vm_id="uuid-1")
|
|
|
|
async def test_destructive_vm_id_check_before_confirm(self, _mock_graphql: AsyncMock) -> None:
|
|
"""Destructive actions without vm_id should fail on vm_id first (validated before confirm)."""
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="vm_id"):
|
|
await tool_fn(action="force_stop")
|
|
|
|
|
|
class TestVmActions:
|
|
async def test_list(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {
|
|
"vms": {
|
|
"domains": [
|
|
{"id": "vm:1", "name": "Windows 11", "state": "RUNNING", "uuid": "uuid-1"},
|
|
]
|
|
}
|
|
}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="list")
|
|
assert len(result["vms"]) == 1
|
|
assert result["vms"][0]["name"] == "Windows 11"
|
|
|
|
async def test_list_empty(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"vms": {"domains": []}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="list")
|
|
assert result["vms"] == []
|
|
|
|
async def test_list_no_vms_key(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="list")
|
|
assert result["vms"] == []
|
|
|
|
async def test_details_by_uuid(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {
|
|
"vms": {"domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}]}
|
|
}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="details", vm_id="uuid-1")
|
|
assert result["name"] == "Win11"
|
|
|
|
async def test_details_by_name(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {
|
|
"vms": {"domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}]}
|
|
}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="details", vm_id="Win11")
|
|
assert result["uuid"] == "uuid-1"
|
|
|
|
async def test_details_not_found(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {
|
|
"vms": {"domains": [{"id": "vm:1", "name": "Win11", "state": "RUNNING", "uuid": "uuid-1"}]}
|
|
}
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="not found"):
|
|
await tool_fn(action="details", vm_id="nonexistent")
|
|
|
|
async def test_start_vm(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"vm": {"start": True}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="start", vm_id="uuid-1")
|
|
assert result["success"] is True
|
|
assert result["action"] == "start"
|
|
|
|
async def test_force_stop(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"vm": {"forceStop": True}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="force_stop", vm_id="uuid-1", confirm=True)
|
|
assert result["success"] is True
|
|
assert result["action"] == "force_stop"
|
|
|
|
async def test_stop_vm(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"vm": {"stop": True}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="stop", vm_id="uuid-1")
|
|
assert result["success"] is True
|
|
assert result["action"] == "stop"
|
|
|
|
async def test_pause_vm(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"vm": {"pause": True}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="pause", vm_id="uuid-1")
|
|
assert result["success"] is True
|
|
assert result["action"] == "pause"
|
|
|
|
async def test_resume_vm(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"vm": {"resume": True}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="resume", vm_id="uuid-1")
|
|
assert result["success"] is True
|
|
assert result["action"] == "resume"
|
|
|
|
async def test_mutation_unexpected_response(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"vm": {}}
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="Failed to start"):
|
|
await tool_fn(action="start", vm_id="uuid-1")
|
|
|
|
|
|
class TestVmMutationFailures:
|
|
"""Tests for VM mutation responses indicating failure or unexpected shapes."""
|
|
|
|
async def test_start_mutation_empty_response(self, _mock_graphql: AsyncMock) -> None:
|
|
"""Empty top-level response (no 'vm' key) should raise ToolError."""
|
|
_mock_graphql.return_value = {}
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="Failed to start"):
|
|
await tool_fn(action="start", vm_id="uuid-1")
|
|
|
|
async def test_start_mutation_returns_false(self, _mock_graphql: AsyncMock) -> None:
|
|
"""VM start returning False should still succeed (the tool reports the raw value)."""
|
|
_mock_graphql.return_value = {"vm": {"start": False}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="start", vm_id="uuid-1")
|
|
assert result["success"] is False
|
|
assert result["action"] == "start"
|
|
|
|
async def test_stop_mutation_returns_null(self, _mock_graphql: AsyncMock) -> None:
|
|
"""VM stop returning None in the field should raise ToolError (field not 'in' data)."""
|
|
_mock_graphql.return_value = {"vm": {"stop": None}}
|
|
tool_fn = _make_tool()
|
|
# The check is `field in data["vm"]` — None is truthy for `in`, so it succeeds
|
|
result = await tool_fn(action="stop", vm_id="uuid-1")
|
|
assert result["success"] is None
|
|
assert result["action"] == "stop"
|
|
|
|
async def test_force_stop_mutation_empty_vm_object(self, _mock_graphql: AsyncMock) -> None:
|
|
"""Empty vm object with no matching field should raise ToolError."""
|
|
_mock_graphql.return_value = {"vm": {}}
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="Failed to force_stop"):
|
|
await tool_fn(action="force_stop", vm_id="uuid-1", confirm=True)
|
|
|
|
async def test_reboot_mutation_vm_key_none(self, _mock_graphql: AsyncMock) -> None:
|
|
"""vm key being None should raise ToolError."""
|
|
_mock_graphql.return_value = {"vm": None}
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="Failed to reboot"):
|
|
await tool_fn(action="reboot", vm_id="uuid-1")
|
|
|
|
async def test_mutation_timeout(self, _mock_graphql: AsyncMock) -> None:
|
|
"""Mid-operation timeout should be wrapped in ToolError."""
|
|
|
|
_mock_graphql.side_effect = TimeoutError("VM operation timed out")
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="timed out"):
|
|
await tool_fn(action="start", vm_id="uuid-1")
|