fix: address 54 MEDIUM/LOW priority PR review issues

Comprehensive fixes across Python code, shell scripts, and documentation
addressing all remaining MEDIUM and LOW priority review comments.

Python Code Fixes (27 fixes):
- tools/info.py: Simplified dispatch with lookup tables, defensive guards,
  CPU fallback formatting, !s conversion flags, module-level sync assertion
- tools/docker.py: Case-insensitive container ID regex, keyword-only confirm,
  module-level ALL_ACTIONS constant
- tools/virtualization.py: Normalized single-VM dict responses, unified
  list/details queries
- core/client.py: Fixed HTTP client singleton race condition, compound key
  substring matching for sensitive data redaction
- subscriptions/: Extracted SSL context creation to shared helper in utils.py,
  replaced deprecated ssl._create_unverified_context API
- tools/array.py: Renamed parity_history to parity_status, hoisted ALL_ACTIONS
- tools/storage.py: Fixed dict(None) risks, temperature 0 falsiness bug
- tools/notifications.py, keys.py, rclone.py: Fixed dict(None) TypeError risks
- tests/: Fixed generator type annotations, added coverage for compound keys

Shell Script Fixes (13 fixes):
- dashboard.sh: Dynamic server discovery, conditional debug output, null-safe
  jq, notification count guard order, removed unused variables
- unraid-query.sh: Proper JSON escaping via jq, --ignore-errors and --insecure
  CLI flags, TLS verification now on by default
- validate-marketplace.sh: Removed unused YELLOW variable, defensive jq,
  simplified repository URL output

Documentation Fixes (24+ fixes):
- Version consistency: Updated all references to v0.2.0 across pyproject.toml,
  plugin.json, marketplace.json, MARKETPLACE.md, __init__.py, README files
- Tool count updates: Changed all "26 tools" references to "10 tools, 90 actions"
- Markdown lint: Fixed MD022, MD031, MD047 issues across multiple files
- Research docs: Fixed auth headers, removed web artifacts, corrected stale info
- Skills docs: Fixed query examples, endpoint counts, env var references

All 227 tests pass, ruff and ty checks clean.
This commit is contained in:
Jacob Magar
2026-02-15 17:09:31 -05:00
parent 6bbe46879e
commit 37e9424a5c
58 changed files with 1333 additions and 1175 deletions

View File

@@ -20,14 +20,21 @@ from ..config.settings import (
)
from ..core.exceptions import ToolError
# Sensitive keys to redact from debug logs
_SENSITIVE_KEYS = {"password", "key", "secret", "token", "apikey"}
def _is_sensitive_key(key: str) -> bool:
"""Check if a key name contains any sensitive substring."""
key_lower = key.lower()
return any(s in key_lower for s in _SENSITIVE_KEYS)
def _redact_sensitive(obj: Any) -> Any:
"""Recursively redact sensitive values from nested dicts/lists."""
if isinstance(obj, dict):
return {k: ("***" if k.lower() in _SENSITIVE_KEYS else _redact_sensitive(v)) for k, v in obj.items()}
return {k: ("***" if _is_sensitive_key(k) else _redact_sensitive(v)) for k, v in obj.items()}
if isinstance(obj, list):
return [_redact_sensitive(item) for item in obj]
return obj
@@ -35,7 +42,25 @@ def _redact_sensitive(obj: Any) -> Any:
# HTTP timeout configuration
DEFAULT_TIMEOUT = httpx.Timeout(10.0, read=30.0, connect=5.0)
DISK_TIMEOUT = httpx.Timeout(10.0, read=TIMEOUT_CONFIG['disk_operations'], connect=5.0)
DISK_TIMEOUT = httpx.Timeout(10.0, read=TIMEOUT_CONFIG["disk_operations"], connect=5.0)
# Named timeout profiles
_TIMEOUT_PROFILES: dict[str, httpx.Timeout] = {
"default": DEFAULT_TIMEOUT,
"disk_operations": DISK_TIMEOUT,
}
def get_timeout_for_operation(profile: str) -> httpx.Timeout:
"""Get a timeout configuration by profile name.
Args:
profile: Timeout profile name (e.g., "default", "disk_operations")
Returns:
The matching httpx.Timeout, falling back to DEFAULT_TIMEOUT for unknown profiles
"""
return _TIMEOUT_PROFILES.get(profile, DEFAULT_TIMEOUT)
# Global connection pool (module-level singleton)
_http_client: httpx.AsyncClient | None = None
@@ -55,26 +80,54 @@ def is_idempotent_error(error_message: str, operation: str) -> bool:
error_lower = error_message.lower()
# Docker container operation patterns
if operation == 'start':
if operation == "start":
return (
'already started' in error_lower or
'container already running' in error_lower or
'http code 304' in error_lower
"already started" in error_lower or
"container already running" in error_lower or
"http code 304" in error_lower
)
elif operation == 'stop':
if operation == "stop":
return (
'already stopped' in error_lower or
'container already stopped' in error_lower or
'container not running' in error_lower or
'http code 304' in error_lower
"already stopped" in error_lower or
"container already stopped" in error_lower or
"container not running" in error_lower or
"http code 304" in error_lower
)
return False
async def _create_http_client() -> httpx.AsyncClient:
"""Create a new HTTP client instance with connection pooling.
Returns:
A new AsyncClient configured for Unraid API communication
"""
return httpx.AsyncClient(
# Connection pool settings
limits=httpx.Limits(
max_keepalive_connections=20,
max_connections=100,
keepalive_expiry=30.0
),
# Default timeout (can be overridden per-request)
timeout=DEFAULT_TIMEOUT,
# SSL verification
verify=UNRAID_VERIFY_SSL,
# Connection pooling headers
headers={
"Connection": "keep-alive",
"User-Agent": f"UnraidMCPServer/{VERSION}"
}
)
async def get_http_client() -> httpx.AsyncClient:
"""Get or create shared HTTP client with connection pooling.
The client is protected by an asyncio lock to prevent concurrent creation.
If the existing client was closed (e.g., during shutdown), a new one is created.
Returns:
Singleton AsyncClient instance with connection pooling enabled
"""
@@ -82,26 +135,21 @@ async def get_http_client() -> httpx.AsyncClient:
async with _client_lock:
if _http_client is None or _http_client.is_closed:
_http_client = httpx.AsyncClient(
# Connection pool settings
limits=httpx.Limits(
max_keepalive_connections=20,
max_connections=100,
keepalive_expiry=30.0
),
# Default timeout (can be overridden per-request)
timeout=DEFAULT_TIMEOUT,
# SSL verification
verify=UNRAID_VERIFY_SSL,
# Connection pooling headers
headers={
"Connection": "keep-alive",
"User-Agent": f"UnraidMCPServer/{VERSION}"
}
)
_http_client = await _create_http_client()
logger.info("Created shared HTTP client with connection pooling (20 keepalive, 100 max connections)")
return _http_client
client = _http_client
# Verify client is still open after releasing the lock.
# In asyncio's cooperative model this is unlikely to fail, but guards
# against edge cases where close_http_client runs between yield points.
if client.is_closed:
async with _client_lock:
_http_client = await _create_http_client()
client = _http_client
logger.info("Re-created HTTP client after unexpected close")
return client
async def close_http_client() -> None:
@@ -175,12 +223,12 @@ async def make_graphql_request(
response.raise_for_status() # Raise an exception for HTTP error codes 4xx/5xx
response_data = response.json()
if "errors" in response_data and response_data["errors"]:
if response_data.get("errors"):
error_details = "; ".join([err.get("message", str(err)) for err in response_data["errors"]])
# Check if this is an idempotent error that should be treated as success
if operation_context and operation_context.get('operation'):
operation = operation_context['operation']
if operation_context and operation_context.get("operation"):
operation = operation_context["operation"]
if is_idempotent_error(error_details, operation):
logger.warning(f"Idempotent operation '{operation}' - treating as success: {error_details}")
# Return a success response with the current state information
@@ -204,22 +252,7 @@ async def make_graphql_request(
raise ToolError(f"HTTP error {e.response.status_code}: {e.response.text}") from e
except httpx.RequestError as e:
logger.error(f"Request error occurred: {e}")
raise ToolError(f"Network connection error: {str(e)}") from e
raise ToolError(f"Network connection error: {e!s}") from e
except json.JSONDecodeError as e:
logger.error(f"Failed to decode JSON response: {e}")
raise ToolError(f"Invalid JSON response from Unraid API: {str(e)}") from e
def get_timeout_for_operation(operation_type: str = "default") -> httpx.Timeout:
"""Get appropriate timeout configuration for different operation types.
Args:
operation_type: Type of operation ('default', 'disk_operations')
Returns:
httpx.Timeout configuration appropriate for the operation
"""
if operation_type == "disk_operations":
return DISK_TIMEOUT
else:
return DEFAULT_TIMEOUT
raise ToolError(f"Invalid JSON response from Unraid API: {e!s}") from e