mirror of
https://github.com/jmagar/unraid-mcp.git
synced 2026-03-01 16:04:24 -08:00
Addresses issues found by 4 parallel review agents (code-reviewer,
silent-failure-hunter, type-design-analyzer, pr-test-analyzer).
Source fixes:
- core/utils.py: add public safe_display_url() (moved from tools/health.py)
- core/client.py: rename _redact_sensitive → redact_sensitive (public API)
- core/types.py: add SubscriptionData.__post_init__ for tz-aware datetime
enforcement; remove 6 unused type aliases (SystemHealth, APIResponse, etc.)
- subscriptions/manager.py: add exc_info=True to both except-Exception blocks;
add except ValueError break-on-config-error before retry loop; import
redact_sensitive by new public name
- subscriptions/resources.py: re-raise in autostart_subscriptions() so
ensure_subscriptions_started() doesn't permanently set _subscriptions_started
- subscriptions/diagnostics.py: except ToolError: raise before broad except;
use safe_display_url() instead of raw URL slice
- tools/health.py: move _safe_display_url to core/utils; add exc_info=True;
raise ToolError (not return dict) on ImportError
- tools/info.py: use get_args(INFO_ACTIONS) instead of INFO_ACTIONS.__args__
- tools/{array,docker,keys,notifications,rclone,storage,virtualization}.py:
add Literal-vs-ALL_ACTIONS sync check at import time
Test fixes:
- test_health.py: import safe_display_url from core.utils; update
test_diagnose_import_error_internal to expect ToolError (not error dict)
- test_storage.py: add 3 safe_get tests for zero/False/empty-string values
- test_subscription_manager.py: add TestCapLogContentSingleMassiveLine (2 tests)
- test_client.py: rename _redact_sensitive → redact_sensitive; add tests for
new sensitive keys and is_cacheable explicit-keyword form
286 lines
11 KiB
Python
286 lines
11 KiB
Python
"""Tests for unraid_storage tool."""
|
|
|
|
from collections.abc import Generator
|
|
from unittest.mock import AsyncMock, patch
|
|
|
|
import pytest
|
|
from conftest import make_tool_fn
|
|
|
|
from unraid_mcp.core.exceptions import ToolError
|
|
from unraid_mcp.core.utils import format_bytes, format_kb, safe_get
|
|
|
|
|
|
# --- Unit tests for helpers ---
|
|
|
|
|
|
class TestFormatBytes:
|
|
def test_none(self) -> None:
|
|
assert format_bytes(None) == "N/A"
|
|
|
|
def test_bytes(self) -> None:
|
|
assert format_bytes(512) == "512.00 B"
|
|
|
|
def test_kilobytes(self) -> None:
|
|
assert format_bytes(2048) == "2.00 KB"
|
|
|
|
def test_megabytes(self) -> None:
|
|
assert format_bytes(1048576) == "1.00 MB"
|
|
|
|
def test_gigabytes(self) -> None:
|
|
assert format_bytes(1073741824) == "1.00 GB"
|
|
|
|
def test_terabytes(self) -> None:
|
|
assert format_bytes(1099511627776) == "1.00 TB"
|
|
|
|
|
|
# --- Integration tests ---
|
|
|
|
|
|
@pytest.fixture
|
|
def _mock_graphql() -> Generator[AsyncMock, None, None]:
|
|
with patch("unraid_mcp.tools.storage.make_graphql_request", new_callable=AsyncMock) as mock:
|
|
yield mock
|
|
|
|
|
|
def _make_tool():
|
|
return make_tool_fn("unraid_mcp.tools.storage", "register_storage_tool", "unraid_storage")
|
|
|
|
|
|
class TestStorageValidation:
|
|
async def test_disk_details_requires_disk_id(self, _mock_graphql: AsyncMock) -> None:
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="disk_id"):
|
|
await tool_fn(action="disk_details")
|
|
|
|
async def test_logs_requires_log_path(self, _mock_graphql: AsyncMock) -> None:
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="log_path"):
|
|
await tool_fn(action="logs")
|
|
|
|
async def test_logs_rejects_invalid_path(self, _mock_graphql: AsyncMock) -> None:
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="log_path must start with"):
|
|
await tool_fn(action="logs", log_path="/etc/shadow")
|
|
|
|
async def test_logs_rejects_path_traversal(self, _mock_graphql: AsyncMock) -> None:
|
|
tool_fn = _make_tool()
|
|
# Traversal that escapes /var/log/ to reach /etc/shadow
|
|
with pytest.raises(ToolError, match="log_path must start with"):
|
|
await tool_fn(action="logs", log_path="/var/log/../../etc/shadow")
|
|
# Traversal that escapes /mnt/ to reach /etc/passwd
|
|
with pytest.raises(ToolError, match="log_path must start with"):
|
|
await tool_fn(action="logs", log_path="/mnt/../etc/passwd")
|
|
|
|
async def test_logs_allows_valid_paths(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "ok"}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
|
assert result["content"] == "ok"
|
|
|
|
async def test_logs_tail_lines_too_large(self, _mock_graphql: AsyncMock) -> None:
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="tail_lines must be between"):
|
|
await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=10_001)
|
|
|
|
async def test_logs_tail_lines_zero_rejected(self, _mock_graphql: AsyncMock) -> None:
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="tail_lines must be between"):
|
|
await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=0)
|
|
|
|
async def test_logs_tail_lines_at_max_accepted(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"logFile": {"path": "/var/log/syslog", "content": "ok"}}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="logs", log_path="/var/log/syslog", tail_lines=10_000)
|
|
assert result["content"] == "ok"
|
|
|
|
async def test_non_logs_action_ignores_tail_lines_validation(
|
|
self, _mock_graphql: AsyncMock
|
|
) -> None:
|
|
_mock_graphql.return_value = {"shares": []}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="shares", tail_lines=0)
|
|
assert result["shares"] == []
|
|
|
|
|
|
class TestFormatKb:
|
|
def test_none_returns_na(self) -> None:
|
|
assert format_kb(None) == "N/A"
|
|
|
|
def test_invalid_string_returns_na(self) -> None:
|
|
assert format_kb("not-a-number") == "N/A"
|
|
|
|
def test_kilobytes_range(self) -> None:
|
|
assert format_kb(512) == "512.00 KB"
|
|
|
|
def test_megabytes_range(self) -> None:
|
|
assert format_kb(2048) == "2.00 MB"
|
|
|
|
def test_gigabytes_range(self) -> None:
|
|
assert format_kb(1_048_576) == "1.00 GB"
|
|
|
|
def test_terabytes_range(self) -> None:
|
|
assert format_kb(1_073_741_824) == "1.00 TB"
|
|
|
|
def test_boundary_exactly_1024_kb(self) -> None:
|
|
# 1024 KB = 1 MB
|
|
assert format_kb(1024) == "1.00 MB"
|
|
|
|
|
|
class TestSafeGet:
|
|
def test_simple_key_access(self) -> None:
|
|
assert safe_get({"a": 1}, "a") == 1
|
|
|
|
def test_nested_key_access(self) -> None:
|
|
assert safe_get({"a": {"b": "val"}}, "a", "b") == "val"
|
|
|
|
def test_missing_key_returns_none(self) -> None:
|
|
assert safe_get({"a": 1}, "missing") is None
|
|
|
|
def test_none_intermediate_returns_default(self) -> None:
|
|
assert safe_get({"a": None}, "a", "b") is None
|
|
|
|
def test_custom_default_returned(self) -> None:
|
|
assert safe_get({}, "x", default="fallback") == "fallback"
|
|
|
|
def test_non_dict_intermediate_returns_default(self) -> None:
|
|
assert safe_get({"a": "string"}, "a", "b") is None
|
|
|
|
def test_empty_list_default(self) -> None:
|
|
result = safe_get({}, "missing", default=[])
|
|
assert result == []
|
|
|
|
def test_zero_value_not_replaced_by_default(self) -> None:
|
|
assert safe_get({"temp": 0}, "temp", default="N/A") == 0
|
|
|
|
def test_false_value_not_replaced_by_default(self) -> None:
|
|
assert safe_get({"active": False}, "active", default=True) is False
|
|
|
|
def test_empty_string_not_replaced_by_default(self) -> None:
|
|
assert safe_get({"name": ""}, "name", default="unknown") == ""
|
|
|
|
|
|
class TestStorageActions:
|
|
async def test_shares(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {
|
|
"shares": [{"id": "s:1", "name": "media"}, {"id": "s:2", "name": "backups"}]
|
|
}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="shares")
|
|
assert len(result["shares"]) == 2
|
|
|
|
async def test_disks(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"disks": [{"id": "d:1", "device": "sda"}]}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="disks")
|
|
assert len(result["disks"]) == 1
|
|
|
|
async def test_disk_details(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {
|
|
"disk": {
|
|
"id": "d:1",
|
|
"device": "sda",
|
|
"name": "WD",
|
|
"serialNum": "SN1",
|
|
"size": 1073741824,
|
|
"temperature": 35,
|
|
}
|
|
}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="disk_details", disk_id="d:1")
|
|
assert result["summary"]["temperature"] == "35\u00b0C"
|
|
assert "1.00 GB" in result["summary"]["size_formatted"]
|
|
|
|
async def test_disk_details_temperature_zero(self, _mock_graphql: AsyncMock) -> None:
|
|
"""Temperature of 0 should display as '0\u00b0C', not 'N/A'."""
|
|
_mock_graphql.return_value = {
|
|
"disk": {
|
|
"id": "d:1",
|
|
"device": "sda",
|
|
"name": "WD",
|
|
"serialNum": "SN1",
|
|
"size": 1073741824,
|
|
"temperature": 0,
|
|
}
|
|
}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="disk_details", disk_id="d:1")
|
|
assert result["summary"]["temperature"] == "0\u00b0C"
|
|
|
|
async def test_disk_details_temperature_null(self, _mock_graphql: AsyncMock) -> None:
|
|
"""Null temperature should display as 'N/A'."""
|
|
_mock_graphql.return_value = {
|
|
"disk": {
|
|
"id": "d:1",
|
|
"device": "sda",
|
|
"name": "WD",
|
|
"serialNum": "SN1",
|
|
"size": 1073741824,
|
|
"temperature": None,
|
|
}
|
|
}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="disk_details", disk_id="d:1")
|
|
assert result["summary"]["temperature"] == "N/A"
|
|
|
|
async def test_logs_null_log_file(self, _mock_graphql: AsyncMock) -> None:
|
|
"""logFile being null should return an empty dict."""
|
|
_mock_graphql.return_value = {"logFile": None}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
|
assert result == {}
|
|
|
|
async def test_disk_details_not_found(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"disk": None}
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="not found"):
|
|
await tool_fn(action="disk_details", disk_id="d:missing")
|
|
|
|
async def test_unassigned(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"unassignedDevices": []}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="unassigned")
|
|
assert result["devices"] == []
|
|
|
|
async def test_log_files(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {"logFiles": [{"name": "syslog", "path": "/var/log/syslog"}]}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="log_files")
|
|
assert len(result["log_files"]) == 1
|
|
|
|
async def test_logs(self, _mock_graphql: AsyncMock) -> None:
|
|
_mock_graphql.return_value = {
|
|
"logFile": {"path": "/var/log/syslog", "content": "log line", "totalLines": 1}
|
|
}
|
|
tool_fn = _make_tool()
|
|
result = await tool_fn(action="logs", log_path="/var/log/syslog")
|
|
assert result["content"] == "log line"
|
|
|
|
|
|
class TestStorageNetworkErrors:
|
|
"""Tests for network-level failures in storage operations."""
|
|
|
|
async def test_logs_json_decode_error(self, _mock_graphql: AsyncMock) -> None:
|
|
"""Invalid JSON response when reading logs should propagate as ToolError."""
|
|
_mock_graphql.side_effect = ToolError(
|
|
"Invalid JSON response from Unraid API: Expecting value: line 1 column 1"
|
|
)
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="Invalid JSON"):
|
|
await tool_fn(action="logs", log_path="/var/log/syslog")
|
|
|
|
async def test_shares_connection_refused(self, _mock_graphql: AsyncMock) -> None:
|
|
"""Connection refused when listing shares should propagate as ToolError."""
|
|
_mock_graphql.side_effect = ToolError(
|
|
"Network connection error: [Errno 111] Connection refused"
|
|
)
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="Connection refused"):
|
|
await tool_fn(action="shares")
|
|
|
|
async def test_disks_http_500(self, _mock_graphql: AsyncMock) -> None:
|
|
"""HTTP 500 when listing disks should propagate as ToolError."""
|
|
_mock_graphql.side_effect = ToolError("HTTP error 500: Internal Server Error")
|
|
tool_fn = _make_tool()
|
|
with pytest.raises(ToolError, match="HTTP error 500"):
|
|
await tool_fn(action="disks")
|