mirror of
https://github.com/jmagar/unraid-mcp.git
synced 2026-03-02 00:04:45 -08:00
fix: apply all PR review agent findings (silent failures, type safety, test gaps)
Addresses issues found by 4 parallel review agents (code-reviewer,
silent-failure-hunter, type-design-analyzer, pr-test-analyzer).
Source fixes:
- core/utils.py: add public safe_display_url() (moved from tools/health.py)
- core/client.py: rename _redact_sensitive → redact_sensitive (public API)
- core/types.py: add SubscriptionData.__post_init__ for tz-aware datetime
enforcement; remove 6 unused type aliases (SystemHealth, APIResponse, etc.)
- subscriptions/manager.py: add exc_info=True to both except-Exception blocks;
add except ValueError break-on-config-error before retry loop; import
redact_sensitive by new public name
- subscriptions/resources.py: re-raise in autostart_subscriptions() so
ensure_subscriptions_started() doesn't permanently set _subscriptions_started
- subscriptions/diagnostics.py: except ToolError: raise before broad except;
use safe_display_url() instead of raw URL slice
- tools/health.py: move _safe_display_url to core/utils; add exc_info=True;
raise ToolError (not return dict) on ImportError
- tools/info.py: use get_args(INFO_ACTIONS) instead of INFO_ACTIONS.__args__
- tools/{array,docker,keys,notifications,rclone,storage,virtualization}.py:
add Literal-vs-ALL_ACTIONS sync check at import time
Test fixes:
- test_health.py: import safe_display_url from core.utils; update
test_diagnose_import_error_internal to expect ToolError (not error dict)
- test_storage.py: add 3 safe_get tests for zero/False/empty-string values
- test_subscription_manager.py: add TestCapLogContentSingleMassiveLine (2 tests)
- test_client.py: rename _redact_sensitive → redact_sensitive; add tests for
new sensitive keys and is_cacheable explicit-keyword form
This commit is contained in:
@@ -16,6 +16,7 @@ import websockets.exceptions
|
||||
|
||||
from unraid_mcp.subscriptions.manager import SubscriptionManager
|
||||
|
||||
|
||||
pytestmark = pytest.mark.integration
|
||||
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@ from unraid_mcp.core.client import (
|
||||
DISK_TIMEOUT,
|
||||
_QueryCache,
|
||||
_RateLimiter,
|
||||
_redact_sensitive,
|
||||
is_idempotent_error,
|
||||
make_graphql_request,
|
||||
redact_sensitive,
|
||||
)
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
|
||||
@@ -60,7 +60,7 @@ class TestIsIdempotentError:
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _redact_sensitive
|
||||
# redact_sensitive
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -69,36 +69,36 @@ class TestRedactSensitive:
|
||||
|
||||
def test_flat_dict(self) -> None:
|
||||
data = {"username": "admin", "password": "hunter2", "host": "10.0.0.1"}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result["username"] == "admin"
|
||||
assert result["password"] == "***"
|
||||
assert result["host"] == "10.0.0.1"
|
||||
|
||||
def test_nested_dict(self) -> None:
|
||||
data = {"config": {"apiKey": "abc123", "url": "http://host"}}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result["config"]["apiKey"] == "***"
|
||||
assert result["config"]["url"] == "http://host"
|
||||
|
||||
def test_list_of_dicts(self) -> None:
|
||||
data = [{"token": "t1"}, {"name": "safe"}]
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result[0]["token"] == "***"
|
||||
assert result[1]["name"] == "safe"
|
||||
|
||||
def test_deeply_nested(self) -> None:
|
||||
data = {"a": {"b": {"c": {"secret": "deep"}}}}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result["a"]["b"]["c"]["secret"] == "***"
|
||||
|
||||
def test_non_dict_passthrough(self) -> None:
|
||||
assert _redact_sensitive("plain_string") == "plain_string"
|
||||
assert _redact_sensitive(42) == 42
|
||||
assert _redact_sensitive(None) is None
|
||||
assert redact_sensitive("plain_string") == "plain_string"
|
||||
assert redact_sensitive(42) == 42
|
||||
assert redact_sensitive(None) is None
|
||||
|
||||
def test_case_insensitive_keys(self) -> None:
|
||||
data = {"Password": "p1", "TOKEN": "t1", "ApiKey": "k1", "Secret": "s1", "Key": "x1"}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
for v in result.values():
|
||||
assert v == "***"
|
||||
|
||||
@@ -112,7 +112,7 @@ class TestRedactSensitive:
|
||||
"username": "safe",
|
||||
"host": "safe",
|
||||
}
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result["user_password"] == "***"
|
||||
assert result["api_key_value"] == "***"
|
||||
assert result["auth_token_expiry"] == "***"
|
||||
@@ -122,12 +122,26 @@ class TestRedactSensitive:
|
||||
|
||||
def test_mixed_list_content(self) -> None:
|
||||
data = [{"key": "val"}, "string", 123, [{"token": "inner"}]]
|
||||
result = _redact_sensitive(data)
|
||||
result = redact_sensitive(data)
|
||||
assert result[0]["key"] == "***"
|
||||
assert result[1] == "string"
|
||||
assert result[2] == 123
|
||||
assert result[3][0]["token"] == "***"
|
||||
|
||||
def test_new_sensitive_keys_are_redacted(self) -> None:
|
||||
"""PR-added keys: authorization, cookie, session, credential, passphrase, jwt."""
|
||||
data = {
|
||||
"authorization": "Bearer token123",
|
||||
"cookie": "session=abc",
|
||||
"jwt": "eyJ...",
|
||||
"credential": "secret_cred",
|
||||
"passphrase": "hunter2",
|
||||
"session": "sess_id",
|
||||
}
|
||||
result = redact_sensitive(data)
|
||||
for key, val in result.items():
|
||||
assert val == "***", f"Key '{key}' was not redacted"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Timeout constants
|
||||
@@ -347,7 +361,7 @@ class TestMakeGraphQLRequestErrors:
|
||||
|
||||
with (
|
||||
patch("unraid_mcp.core.client.get_http_client", return_value=mock_client),
|
||||
pytest.raises(ToolError, match="invalid response.*not valid JSON"),
|
||||
pytest.raises(ToolError, match=r"invalid response.*not valid JSON"),
|
||||
):
|
||||
await make_graphql_request("{ info }")
|
||||
|
||||
@@ -481,7 +495,7 @@ class TestRateLimiter:
|
||||
limiter = _RateLimiter(max_tokens=10, refill_rate=1.0)
|
||||
initial = limiter.tokens
|
||||
await limiter.acquire()
|
||||
assert limiter.tokens == initial - 1
|
||||
assert limiter.tokens == pytest.approx(initial - 1, abs=1e-3)
|
||||
|
||||
async def test_acquire_succeeds_when_tokens_available(self) -> None:
|
||||
limiter = _RateLimiter(max_tokens=5, refill_rate=1.0)
|
||||
@@ -596,6 +610,15 @@ class TestQueryCache:
|
||||
"""Queries that start with 'mutation' after whitespace are not cacheable."""
|
||||
assert _QueryCache.is_cacheable(" mutation { ... }") is False
|
||||
|
||||
def test_is_cacheable_with_explicit_query_keyword(self) -> None:
|
||||
"""Operation names after explicit 'query' keyword must be recognized."""
|
||||
assert _QueryCache.is_cacheable("query GetNetworkConfig { network { name } }") is True
|
||||
assert _QueryCache.is_cacheable("query GetOwner { owner { name } }") is True
|
||||
|
||||
def test_is_cacheable_anonymous_query_returns_false(self) -> None:
|
||||
"""Anonymous 'query { ... }' has no operation name — must not be cached."""
|
||||
assert _QueryCache.is_cacheable("query { network { name } }") is False
|
||||
|
||||
def test_expired_entry_removed_from_store(self) -> None:
|
||||
"""Accessing an expired entry should remove it from the internal store."""
|
||||
cache = _QueryCache()
|
||||
|
||||
@@ -80,6 +80,14 @@ class TestDockerValidation:
|
||||
with pytest.raises(ToolError, match="network_id"):
|
||||
await tool_fn(action="network_details")
|
||||
|
||||
async def test_non_logs_action_ignores_tail_lines_validation(
|
||||
self, _mock_graphql: AsyncMock
|
||||
) -> None:
|
||||
_mock_graphql.return_value = {"docker": {"containers": []}}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="list", tail_lines=0)
|
||||
assert result["containers"] == []
|
||||
|
||||
|
||||
class TestDockerActions:
|
||||
async def test_list(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -224,9 +232,22 @@ class TestDockerActions:
|
||||
async def test_generic_exception_wraps_in_tool_error(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("unexpected failure")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="unexpected failure"):
|
||||
with pytest.raises(ToolError, match="Failed to execute docker/list"):
|
||||
await tool_fn(action="list")
|
||||
|
||||
async def test_short_id_prefix_ambiguous_rejected(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.return_value = {
|
||||
"docker": {
|
||||
"containers": [
|
||||
{"id": "abcdef1234560000000000000000000000000000000000000000000000000000:local", "names": ["plex"]},
|
||||
{"id": "abcdef1234561111111111111111111111111111111111111111111111111111:local", "names": ["sonarr"]},
|
||||
]
|
||||
}
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="ambiguous"):
|
||||
await tool_fn(action="logs", container_id="abcdef123456")
|
||||
|
||||
|
||||
class TestDockerMutationFailures:
|
||||
"""Tests for mutation responses that indicate failure or unexpected shapes."""
|
||||
|
||||
@@ -7,7 +7,7 @@ import pytest
|
||||
from conftest import make_tool_fn
|
||||
|
||||
from unraid_mcp.core.exceptions import ToolError
|
||||
from unraid_mcp.tools.health import _safe_display_url
|
||||
from unraid_mcp.core.utils import safe_display_url
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -100,7 +100,7 @@ class TestHealthActions:
|
||||
"unraid_mcp.tools.health._diagnose_subscriptions",
|
||||
side_effect=RuntimeError("broken"),
|
||||
),
|
||||
pytest.raises(ToolError, match="broken"),
|
||||
pytest.raises(ToolError, match="Failed to execute health/diagnose"),
|
||||
):
|
||||
await tool_fn(action="diagnose")
|
||||
|
||||
@@ -115,7 +115,7 @@ class TestHealthActions:
|
||||
assert "cpu_sub" in result
|
||||
|
||||
async def test_diagnose_import_error_internal(self) -> None:
|
||||
"""_diagnose_subscriptions catches ImportError and returns error dict."""
|
||||
"""_diagnose_subscriptions raises ToolError when subscription modules are unavailable."""
|
||||
import sys
|
||||
|
||||
from unraid_mcp.tools.health import _diagnose_subscriptions
|
||||
@@ -127,16 +127,18 @@ class TestHealthActions:
|
||||
|
||||
try:
|
||||
# Replace the modules with objects that raise ImportError on access
|
||||
with patch.dict(
|
||||
sys.modules,
|
||||
{
|
||||
"unraid_mcp.subscriptions": None,
|
||||
"unraid_mcp.subscriptions.manager": None,
|
||||
"unraid_mcp.subscriptions.resources": None,
|
||||
},
|
||||
with (
|
||||
patch.dict(
|
||||
sys.modules,
|
||||
{
|
||||
"unraid_mcp.subscriptions": None,
|
||||
"unraid_mcp.subscriptions.manager": None,
|
||||
"unraid_mcp.subscriptions.resources": None,
|
||||
},
|
||||
),
|
||||
pytest.raises(ToolError, match="Subscription modules not available"),
|
||||
):
|
||||
result = await _diagnose_subscriptions()
|
||||
assert "error" in result
|
||||
await _diagnose_subscriptions()
|
||||
finally:
|
||||
# Restore cached modules
|
||||
sys.modules.update(cached)
|
||||
@@ -148,47 +150,47 @@ class TestHealthActions:
|
||||
|
||||
|
||||
class TestSafeDisplayUrl:
|
||||
"""Verify that _safe_display_url strips credentials/path and preserves scheme+host+port."""
|
||||
"""Verify that safe_display_url strips credentials/path and preserves scheme+host+port."""
|
||||
|
||||
def test_none_returns_none(self) -> None:
|
||||
assert _safe_display_url(None) is None
|
||||
assert safe_display_url(None) is None
|
||||
|
||||
def test_empty_string_returns_none(self) -> None:
|
||||
assert _safe_display_url("") is None
|
||||
assert safe_display_url("") is None
|
||||
|
||||
def test_simple_url_scheme_and_host(self) -> None:
|
||||
assert _safe_display_url("https://unraid.local/graphql") == "https://unraid.local"
|
||||
assert safe_display_url("https://unraid.local/graphql") == "https://unraid.local"
|
||||
|
||||
def test_preserves_port(self) -> None:
|
||||
assert _safe_display_url("https://10.1.0.2:31337/api/graphql") == "https://10.1.0.2:31337"
|
||||
assert safe_display_url("https://10.1.0.2:31337/api/graphql") == "https://10.1.0.2:31337"
|
||||
|
||||
def test_strips_path(self) -> None:
|
||||
result = _safe_display_url("http://unraid.local/some/deep/path?query=1")
|
||||
result = safe_display_url("http://unraid.local/some/deep/path?query=1")
|
||||
assert "path" not in result
|
||||
assert "query" not in result
|
||||
|
||||
def test_strips_credentials(self) -> None:
|
||||
result = _safe_display_url("https://user:password@unraid.local/graphql")
|
||||
result = safe_display_url("https://user:password@unraid.local/graphql")
|
||||
assert "user" not in result
|
||||
assert "password" not in result
|
||||
assert result == "https://unraid.local"
|
||||
|
||||
def test_strips_query_params(self) -> None:
|
||||
result = _safe_display_url("http://host.local?token=abc&key=xyz")
|
||||
result = safe_display_url("http://host.local?token=abc&key=xyz")
|
||||
assert "token" not in result
|
||||
assert "abc" not in result
|
||||
|
||||
def test_http_scheme_preserved(self) -> None:
|
||||
result = _safe_display_url("http://10.0.0.1:8080/api")
|
||||
result = safe_display_url("http://10.0.0.1:8080/api")
|
||||
assert result == "http://10.0.0.1:8080"
|
||||
|
||||
def test_tailscale_url(self) -> None:
|
||||
result = _safe_display_url("https://100.118.209.1:31337/graphql")
|
||||
result = safe_display_url("https://100.118.209.1:31337/graphql")
|
||||
assert result == "https://100.118.209.1:31337"
|
||||
|
||||
def test_malformed_ipv6_url_returns_unparseable(self) -> None:
|
||||
"""Malformed IPv6 brackets in netloc cause urlparse.hostname to raise ValueError."""
|
||||
# urlparse("https://[invalid") parses without error, but accessing .hostname
|
||||
# raises ValueError: Invalid IPv6 URL — this triggers the except branch.
|
||||
result = _safe_display_url("https://[invalid")
|
||||
result = safe_display_url("https://[invalid")
|
||||
assert result == "<unparseable>"
|
||||
|
||||
@@ -186,7 +186,7 @@ class TestUnraidInfoTool:
|
||||
async def test_generic_exception_wraps(self, _mock_graphql: AsyncMock) -> None:
|
||||
_mock_graphql.side_effect = RuntimeError("unexpected")
|
||||
tool_fn = _make_tool()
|
||||
with pytest.raises(ToolError, match="unexpected"):
|
||||
with pytest.raises(ToolError, match="Failed to execute info/online"):
|
||||
await tool_fn(action="online")
|
||||
|
||||
async def test_metrics(self, _mock_graphql: AsyncMock) -> None:
|
||||
@@ -201,6 +201,7 @@ class TestUnraidInfoTool:
|
||||
_mock_graphql.return_value = {"services": [{"name": "docker", "state": "running"}]}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="services")
|
||||
assert "services" in result
|
||||
assert len(result["services"]) == 1
|
||||
assert result["services"][0]["name"] == "docker"
|
||||
|
||||
@@ -225,6 +226,7 @@ class TestUnraidInfoTool:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="servers")
|
||||
assert "servers" in result
|
||||
assert len(result["servers"]) == 1
|
||||
assert result["servers"][0]["name"] == "tower"
|
||||
|
||||
@@ -248,6 +250,7 @@ class TestUnraidInfoTool:
|
||||
}
|
||||
tool_fn = _make_tool()
|
||||
result = await tool_fn(action="ups_devices")
|
||||
assert "ups_devices" in result
|
||||
assert len(result["ups_devices"]) == 1
|
||||
assert result["ups_devices"][0]["model"] == "APC"
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@ def _make_tool():
|
||||
return make_tool_fn("unraid_mcp.tools.rclone", "register_rclone_tool", "unraid_rclone")
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("_mock_graphql")
|
||||
class TestRcloneValidation:
|
||||
async def test_delete_requires_confirm(self) -> None:
|
||||
tool_fn = _make_tool()
|
||||
|
||||
@@ -149,6 +149,15 @@ class TestSafeGet:
|
||||
result = safe_get({}, "missing", default=[])
|
||||
assert result == []
|
||||
|
||||
def test_zero_value_not_replaced_by_default(self) -> None:
|
||||
assert safe_get({"temp": 0}, "temp", default="N/A") == 0
|
||||
|
||||
def test_false_value_not_replaced_by_default(self) -> None:
|
||||
assert safe_get({"active": False}, "active", default=True) is False
|
||||
|
||||
def test_empty_string_not_replaced_by_default(self) -> None:
|
||||
assert safe_get({"name": ""}, "name", default="unknown") == ""
|
||||
|
||||
|
||||
class TestStorageActions:
|
||||
async def test_shares(self, _mock_graphql: AsyncMock) -> None:
|
||||
|
||||
@@ -60,8 +60,8 @@ class TestCapLogContentSmallData:
|
||||
class TestCapLogContentTruncation:
|
||||
"""Content exceeding both byte AND line limits must be truncated to the last N lines."""
|
||||
|
||||
def test_oversized_content_truncated_to_last_n_lines(self) -> None:
|
||||
# 200 lines, limit 50 lines, byte limit effectively 0 → should keep last 50 lines
|
||||
def test_oversized_content_truncated_and_byte_capped(self) -> None:
|
||||
# 200 lines, tiny byte limit: must keep recent content within byte cap.
|
||||
lines = [f"line {i}" for i in range(200)]
|
||||
data = {"content": "\n".join(lines)}
|
||||
with (
|
||||
@@ -70,14 +70,13 @@ class TestCapLogContentTruncation:
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
result_lines = result["content"].splitlines()
|
||||
assert len(result_lines) == 50
|
||||
# Must be the LAST 50 lines
|
||||
assert result_lines[0] == "line 150"
|
||||
assert len(result["content"].encode("utf-8", errors="replace")) <= 10
|
||||
# Must keep the most recent line suffix.
|
||||
assert result_lines[-1] == "line 199"
|
||||
|
||||
def test_content_with_fewer_lines_than_limit_not_truncated(self) -> None:
|
||||
"""If byte limit exceeded but line count ≤ limit → keep original (not truncated)."""
|
||||
# 30 lines but byte limit 10 and line limit 50 → 30 < 50 so no truncation
|
||||
def test_content_with_fewer_lines_than_limit_still_honors_byte_cap(self) -> None:
|
||||
"""If byte limit is exceeded, output must still be capped even with few lines."""
|
||||
# 30 lines, byte limit 10, line limit 50 -> must cap bytes regardless of line count
|
||||
lines = [f"line {i}" for i in range(30)]
|
||||
data = {"content": "\n".join(lines)}
|
||||
with (
|
||||
@@ -85,8 +84,7 @@ class TestCapLogContentTruncation:
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
# Original content preserved
|
||||
assert result["content"] == data["content"]
|
||||
assert len(result["content"].encode("utf-8", errors="replace")) <= 10
|
||||
|
||||
def test_non_content_keys_preserved_alongside_truncated_content(self) -> None:
|
||||
lines = [f"line {i}" for i in range(200)]
|
||||
@@ -98,7 +96,7 @@ class TestCapLogContentTruncation:
|
||||
result = _cap_log_content(data)
|
||||
assert result["path"] == "/var/log/syslog"
|
||||
assert result["total_lines"] == 200
|
||||
assert len(result["content"].splitlines()) == 50
|
||||
assert len(result["content"].encode("utf-8", errors="replace")) <= 10
|
||||
|
||||
|
||||
class TestCapLogContentNested:
|
||||
@@ -112,7 +110,7 @@ class TestCapLogContentNested:
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
assert len(result["logFile"]["content"].splitlines()) == 50
|
||||
assert len(result["logFile"]["content"].encode("utf-8", errors="replace")) <= 10
|
||||
assert result["logFile"]["path"] == "/var/log/syslog"
|
||||
|
||||
def test_deeply_nested_content_capped(self) -> None:
|
||||
@@ -123,9 +121,36 @@ class TestCapLogContentNested:
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 50),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
assert len(result["outer"]["inner"]["content"].splitlines()) == 50
|
||||
assert len(result["outer"]["inner"]["content"].encode("utf-8", errors="replace")) <= 10
|
||||
|
||||
def test_nested_non_content_keys_unaffected(self) -> None:
|
||||
data = {"metrics": {"cpu": 42.5, "memory": 8192}}
|
||||
result = _cap_log_content(data)
|
||||
assert result == data
|
||||
|
||||
|
||||
class TestCapLogContentSingleMassiveLine:
|
||||
"""A single line larger than the byte cap must be hard-capped at byte level."""
|
||||
|
||||
def test_single_massive_line_hard_caps_bytes(self) -> None:
|
||||
# One line, no newlines, larger than the byte cap.
|
||||
# The while-loop can't reduce it (len(lines) == 1), so the
|
||||
# last-resort byte-slice path at manager.py:65-69 must fire.
|
||||
huge_content = "x" * 200
|
||||
data = {"content": huge_content}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 5_000),
|
||||
):
|
||||
result = _cap_log_content(data)
|
||||
assert len(result["content"].encode("utf-8", errors="replace")) <= 10
|
||||
|
||||
def test_single_massive_line_input_not_mutated(self) -> None:
|
||||
huge_content = "x" * 200
|
||||
data = {"content": huge_content}
|
||||
with (
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_BYTES", 10),
|
||||
patch("unraid_mcp.subscriptions.manager._MAX_RESOURCE_DATA_LINES", 5_000),
|
||||
):
|
||||
_cap_log_content(data)
|
||||
assert data["content"] == huge_content
|
||||
|
||||
Reference in New Issue
Block a user