fix: address 14 PR review comments from coderabbitai/chatgpt-codex

- guards.py: split confirm bypass into explicit check; use .get() for
  dict description to prevent KeyError on missing action keys
- resources.py: use `is not None` for logs stream cache check; add
  on-demand subscribe_once fallback when auto_start is disabled so
  resources return real data instead of a perpetual "connecting" placeholder
- setup.py: always prompt before overwriting credentials even on failed
  probe (transient outage ≠ bad credentials); update elicitation message
- unraid.py: always elicit_reset_confirmation before overwriting creds;
  use asyncio.to_thread() for os.path.realpath() to avoid blocking async
- test_health.py: update test for new always-prompt-on-overwrite behavior;
  add test for declined-reset on failed probe
- test_resources.py: add tests for logs-stream None check, auto_start
  disabled fallback (success and failure), and fallback error recovery
- test-tools.sh: add suite_live() covering cpu/memory/cpu_telemetry/
  notifications_overview/log_tail; include in sequential and parallel runners
- CLAUDE.md: correct unraid_live → live action reference; document that
  setup always prompts before overwriting; note subscribe_once fallback
This commit is contained in:
Jacob Magar
2026-03-16 03:10:01 -04:00
parent efaab031ae
commit 884319ab11
8 changed files with 149 additions and 17 deletions

View File

@@ -90,10 +90,19 @@ async def gate_destructive_action(
Pass a str when one description covers all destructive actions.
Pass a dict[action_name, description] when descriptions differ.
"""
if action not in destructive_actions or confirm:
if action not in destructive_actions:
return
desc = description[action] if isinstance(description, dict) else description
if confirm:
logger.info("Destructive action '%s' bypassed via confirm=True.", action)
return
if isinstance(description, dict):
desc = description.get(action)
if desc is None:
raise ToolError(f"Missing destructive-action description for '{action}'.")
else:
desc = description
confirmed = await elicit_destructive_confirmation(ctx, action, desc)
if not confirmed:
raise ToolError(

View File

@@ -30,11 +30,11 @@ class _UnraidCredentials:
async def elicit_reset_confirmation(ctx: Context | None, current_url: str) -> bool:
"""Ask the user whether to overwrite already-working credentials.
"""Ask the user whether to overwrite existing credentials.
Args:
ctx: The MCP context for elicitation. If None, returns False immediately.
current_url: The currently configured URL (displayed for context).
current_url: The currently configured URL and status (displayed for context).
Returns:
True if the user confirmed the reset, False otherwise.
@@ -45,7 +45,7 @@ async def elicit_reset_confirmation(ctx: Context | None, current_url: str) -> bo
try:
result = await ctx.elicit(
message=(
"Credentials are already configured and working.\n\n"
"Credentials are already configured.\n\n"
f"**Current URL:** `{current_url}`\n\n"
"Do you want to reset your API URL and key?"
),

View File

@@ -15,6 +15,7 @@ from fastmcp import FastMCP
from ..config.logging import logger
from .manager import subscription_manager
from .queries import SNAPSHOT_ACTIONS
from .snapshot import subscribe_once
# Global flag to track subscription startup
@@ -94,7 +95,7 @@ def register_subscription_resources(mcp: FastMCP) -> None:
"""Real-time log stream data from subscription."""
await ensure_subscriptions_started()
data = await subscription_manager.get_resource_data("logFileSubscription")
if data:
if data is not None:
return json.dumps(data, indent=2)
return json.dumps(
{
@@ -118,6 +119,16 @@ def register_subscription_resources(mcp: FastMCP) -> None:
"message": f"Subscription '{action}' failed: {last_error}",
}
)
# When auto-start is disabled, fall back to a one-shot fetch so the
# resource returns real data instead of a perpetual "connecting" placeholder.
if not subscription_manager.auto_start_enabled:
try:
query_info = SNAPSHOT_ACTIONS.get(action)
if query_info is not None:
fallback_data = await subscribe_once(query_info)
return json.dumps(fallback_data, indent=2)
except Exception as e:
logger.warning("[RESOURCE] On-demand fallback for '%s' failed: %s", action, e)
return json.dumps(
{
"status": "connecting",

View File

@@ -21,6 +21,7 @@ Actions:
live - Real-time WebSocket subscription snapshots (11 subactions)
"""
import asyncio
import datetime
import os
import re
@@ -312,10 +313,20 @@ async def _handle_health(subaction: str, ctx: Context | None) -> dict[str, Any]
connection_ok = True
except Exception:
connection_ok = False
if connection_ok:
reset = await elicit_reset_confirmation(ctx, safe_display_url(UNRAID_API_URL) or "")
if not reset:
return f"✅ Credentials already configured and working.\nURL: `{safe_display_url(UNRAID_API_URL)}`\n\nNo changes made."
status_note = (
"and working"
if connection_ok
else "but the connection test failed — may be a transient outage"
)
reset = await elicit_reset_confirmation(
ctx,
f"{safe_display_url(UNRAID_API_URL) or ''} ({status_note})",
)
if not reset:
return (
f"✅ Credentials already configured ({status_note}).\n"
f"URL: `{safe_display_url(UNRAID_API_URL)}`\n\nNo changes made."
)
configured = await elicit_and_configure(ctx)
if configured:
return "✅ Credentials configured successfully. You can now use all Unraid MCP tools."
@@ -641,7 +652,7 @@ async def _handle_disk(
raise ToolError(f"tail_lines must be between 1 and {_MAX_TAIL_LINES}, got {tail_lines}")
if not log_path:
raise ToolError("log_path is required for disk/logs")
normalized = os.path.realpath(log_path) # noqa: ASYNC240
normalized = await asyncio.to_thread(os.path.realpath, log_path)
if not any(normalized.startswith(p) for p in _ALLOWED_LOG_PREFIXES):
raise ToolError(f"log_path must start with one of: {', '.join(_ALLOWED_LOG_PREFIXES)}")
log_path = normalized