Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0770b254b1 | |||
| e651e3324d | |||
| bbe0279af4 | |||
| 5e5e7b4f35 | |||
| 634f4326c6 | |||
| f54d3b3b7a |
35
HISTORY.md
35
HISTORY.md
@@ -5,10 +5,45 @@ Changelog
|
||||
(unreleased)
|
||||
------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Add Ollama connection health details in UI, refs NOISSUE. [Simon
|
||||
Diesenreiter]
|
||||
|
||||
|
||||
0.9.13 (2026-04-11)
|
||||
-------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Fix internal server error, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.12 (2026-04-11)
|
||||
-------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Remove heuristic decision making fallbacks, refs NOISSUE. [Simon
|
||||
Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.11 (2026-04-11)
|
||||
-------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Project association improvements, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.10 (2026-04-11)
|
||||
-------------------
|
||||
|
||||
@@ -1 +1 @@
|
||||
0.9.11
|
||||
0.9.14
|
||||
|
||||
@@ -2308,6 +2308,10 @@ class DatabaseManager:
|
||||
if commit.get('remote_status') == 'pushed' or commit.get('imported_from_remote') or commit.get('commit_url')
|
||||
]
|
||||
has_pull_request = any(pr.get('pr_state') == 'open' and not pr.get('merged') for pr in pull_requests)
|
||||
published_non_main_commits = [
|
||||
commit for commit in published_commits
|
||||
if (commit.get('branch') or '').strip() not in {'', 'main', 'master'}
|
||||
]
|
||||
if orphan_code_changes:
|
||||
delivery_status = 'uncommitted'
|
||||
delivery_message = (
|
||||
@@ -2320,12 +2324,15 @@ class DatabaseManager:
|
||||
f"{len(local_only_code_changes)} generated file change(s) were committed only in the local workspace. "
|
||||
"No remote repo push was recorded for this prompt yet."
|
||||
)
|
||||
elif published_commits and repository and repository.get('mode') == 'project' and not has_pull_request:
|
||||
elif published_non_main_commits and repository and repository.get('mode') == 'project' and not has_pull_request:
|
||||
delivery_status = 'pushed_no_pr'
|
||||
delivery_message = 'Changes were pushed to the remote repository, but no pull request is currently tracked for review.'
|
||||
elif published_commits:
|
||||
delivery_status = 'delivered'
|
||||
delivery_message = 'Generated changes were published to the tracked repository and are reviewable through the recorded pull request.'
|
||||
if has_pull_request:
|
||||
delivery_message = 'Generated changes were published to the tracked repository and are reviewable through the recorded pull request.'
|
||||
else:
|
||||
delivery_message = 'Generated changes were published directly to the tracked repository default branch.'
|
||||
else:
|
||||
delivery_status = 'pending'
|
||||
delivery_message = 'No git commit has been recorded for this project yet.'
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from urllib import error as urllib_error
|
||||
from urllib import request as urllib_request
|
||||
|
||||
try:
|
||||
from .gitea import GiteaAPI
|
||||
@@ -297,6 +299,27 @@ class LLMServiceClient:
|
||||
except Exception as exc:
|
||||
return None, {'error': str(exc)}, str(exc)
|
||||
|
||||
@staticmethod
|
||||
def extract_error_message(trace: dict | None) -> str | None:
|
||||
"""Extract the most useful provider error message from a trace payload."""
|
||||
if not isinstance(trace, dict):
|
||||
return None
|
||||
raw_response = trace.get('raw_response') if isinstance(trace.get('raw_response'), dict) else {}
|
||||
provider_response = raw_response.get('provider_response') if isinstance(raw_response.get('provider_response'), dict) else {}
|
||||
candidate_errors = [
|
||||
provider_response.get('error'),
|
||||
raw_response.get('error'),
|
||||
trace.get('error'),
|
||||
]
|
||||
raw_responses = trace.get('raw_responses') if isinstance(trace.get('raw_responses'), list) else []
|
||||
for payload in reversed(raw_responses):
|
||||
if isinstance(payload, dict) and payload.get('error'):
|
||||
candidate_errors.append(payload.get('error'))
|
||||
for candidate in candidate_errors:
|
||||
if candidate:
|
||||
return str(candidate).strip()
|
||||
return None
|
||||
|
||||
def _compose_system_prompt(self, stage: str, stage_prompt: str) -> str:
|
||||
"""Merge the stage prompt with configured guardrails."""
|
||||
sections = [stage_prompt.strip()] + self._guardrail_sections(stage)
|
||||
@@ -391,4 +414,118 @@ class LLMServiceClient:
|
||||
'tool_context_limit': settings.llm_tool_context_limit,
|
||||
'max_tool_call_rounds': settings.llm_max_tool_call_rounds,
|
||||
'gitea_live_tools_configured': bool(settings.gitea_url and settings.gitea_token),
|
||||
}
|
||||
|
||||
def health_check_sync(self) -> dict:
|
||||
"""Synchronously check Ollama reachability and configured model availability."""
|
||||
if not self.ollama_url:
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': 'OLLAMA_URL is not configured.',
|
||||
'ollama_url': 'Not configured',
|
||||
'model': self.model,
|
||||
'checks': [],
|
||||
'suggestion': 'Set OLLAMA_URL to the reachable Ollama base URL.',
|
||||
}
|
||||
|
||||
tags_url = f'{self.ollama_url}/api/tags'
|
||||
try:
|
||||
req = urllib_request.Request(tags_url, headers={'User-Agent': 'AI-Software-Factory'}, method='GET')
|
||||
with urllib_request.urlopen(req, timeout=5) as resp:
|
||||
raw_body = resp.read().decode('utf-8')
|
||||
payload = json.loads(raw_body) if raw_body else {}
|
||||
except urllib_error.HTTPError as exc:
|
||||
body = exc.read().decode('utf-8') if exc.fp else ''
|
||||
message = body or str(exc)
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': f'Ollama returned HTTP {exc.code}: {message}',
|
||||
'ollama_url': self.ollama_url,
|
||||
'model': self.model,
|
||||
'checks': [
|
||||
{
|
||||
'name': 'api_tags',
|
||||
'ok': False,
|
||||
'status_code': exc.code,
|
||||
'url': tags_url,
|
||||
'message': message,
|
||||
}
|
||||
],
|
||||
'suggestion': 'Verify OLLAMA_URL points to the Ollama service and that the API is reachable.',
|
||||
}
|
||||
except Exception as exc:
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': f'Unable to reach Ollama: {exc}',
|
||||
'ollama_url': self.ollama_url,
|
||||
'model': self.model,
|
||||
'checks': [
|
||||
{
|
||||
'name': 'api_tags',
|
||||
'ok': False,
|
||||
'status_code': None,
|
||||
'url': tags_url,
|
||||
'message': str(exc),
|
||||
}
|
||||
],
|
||||
'suggestion': 'Verify OLLAMA_URL resolves from the running factory process and that Ollama is listening on that address.',
|
||||
}
|
||||
|
||||
models = payload.get('models') if isinstance(payload, dict) else []
|
||||
model_names: list[str] = []
|
||||
if isinstance(models, list):
|
||||
for model_entry in models:
|
||||
if not isinstance(model_entry, dict):
|
||||
continue
|
||||
name = str(model_entry.get('name') or model_entry.get('model') or '').strip()
|
||||
if name:
|
||||
model_names.append(name)
|
||||
|
||||
requested = (self.model or '').strip()
|
||||
requested_base = requested.split(':', 1)[0]
|
||||
model_available = any(
|
||||
name == requested or name.startswith(f'{requested}:') or name.split(':', 1)[0] == requested_base
|
||||
for name in model_names
|
||||
)
|
||||
checks = [
|
||||
{
|
||||
'name': 'api_tags',
|
||||
'ok': True,
|
||||
'status_code': 200,
|
||||
'url': tags_url,
|
||||
'message': f'Loaded {len(model_names)} model entries.',
|
||||
},
|
||||
{
|
||||
'name': 'configured_model',
|
||||
'ok': model_available,
|
||||
'status_code': None,
|
||||
'url': None,
|
||||
'message': (
|
||||
f'Configured model {requested} is available.'
|
||||
if model_available else
|
||||
f'Configured model {requested} was not found in Ollama tags.'
|
||||
),
|
||||
},
|
||||
]
|
||||
if model_available:
|
||||
return {
|
||||
'status': 'success',
|
||||
'message': f'Ollama is reachable and model {requested} is available.',
|
||||
'ollama_url': self.ollama_url,
|
||||
'model': requested,
|
||||
'model_available': True,
|
||||
'model_count': len(model_names),
|
||||
'models': model_names[:10],
|
||||
'checks': checks,
|
||||
}
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': f'Ollama is reachable, but model {requested} is not available.',
|
||||
'ollama_url': self.ollama_url,
|
||||
'model': requested,
|
||||
'model_available': False,
|
||||
'model_count': len(model_names),
|
||||
'models': model_names[:10],
|
||||
'checks': checks,
|
||||
'suggestion': f'Pull or configure the model {requested}, or update OLLAMA_MODEL to a model that exists in Ollama.',
|
||||
}
|
||||
@@ -124,7 +124,9 @@ class AgentOrchestrator:
|
||||
self.repo_name = repository.get('name') or self.repo_name
|
||||
self.repo_url = repository.get('url') or self.repo_url
|
||||
git_state = latest_ui.get('git') if isinstance(latest_ui.get('git'), dict) else {}
|
||||
self.branch_name = git_state.get('active_branch') or self.branch_name
|
||||
persisted_active_branch = git_state.get('active_branch')
|
||||
if persisted_active_branch and persisted_active_branch not in {'main', 'master'}:
|
||||
self.branch_name = persisted_active_branch
|
||||
if self.prompt_text:
|
||||
self.prompt_audit = self.db_manager.log_prompt_submission(
|
||||
history_id=self.history.id,
|
||||
@@ -168,38 +170,18 @@ class AgentOrchestrator:
|
||||
".gitignore": "__pycache__/\n*.pyc\n.venv/\n.pytest_cache/\n.mypy_cache/\n",
|
||||
}
|
||||
|
||||
def _fallback_generated_files(self) -> dict[str, str]:
|
||||
"""Deterministic fallback files when LLM generation is unavailable."""
|
||||
feature_section = "\n".join(f"- {feature}" for feature in self.features) or "- None specified"
|
||||
tech_section = "\n".join(f"- {tech}" for tech in self.tech_stack) or "- Python"
|
||||
return {
|
||||
"README.md": (
|
||||
f"# {self.project_name}\n\n"
|
||||
f"{self.description}\n\n"
|
||||
"## Features\n"
|
||||
f"{feature_section}\n\n"
|
||||
"## Tech Stack\n"
|
||||
f"{tech_section}\n"
|
||||
),
|
||||
"requirements.txt": "fastapi\nuvicorn\npytest\n",
|
||||
"main.py": (
|
||||
"from fastapi import FastAPI\n\n"
|
||||
"app = FastAPI(title=\"Generated App\")\n\n"
|
||||
"@app.get('/')\n"
|
||||
"def read_root():\n"
|
||||
f" return {{'name': '{self.project_name}', 'status': 'generated', 'features': {self.features!r}}}\n"
|
||||
),
|
||||
"tests/test_app.py": (
|
||||
"from main import read_root\n\n"
|
||||
"def test_read_root():\n"
|
||||
f" assert read_root()['name'] == '{self.project_name}'\n"
|
||||
),
|
||||
}
|
||||
|
||||
def _build_pr_branch_name(self, project_id: str) -> str:
|
||||
"""Build a stable branch name used until the PR is merged."""
|
||||
return f"ai/{project_id}"
|
||||
|
||||
def _should_use_pull_request_flow(self) -> bool:
|
||||
"""Return whether this run should deliver changes through a PR branch."""
|
||||
return self.existing_history is not None or self.active_pull_request is not None
|
||||
|
||||
def _delivery_branch_name(self) -> str:
|
||||
"""Return the git branch used for the current delivery."""
|
||||
return self.branch_name if self._should_use_pull_request_flow() else 'main'
|
||||
|
||||
def _extract_issue_number(self, prompt_text: str | None) -> int | None:
|
||||
"""Extract an issue reference from prompt text."""
|
||||
if not prompt_text:
|
||||
@@ -216,7 +198,7 @@ class AgentOrchestrator:
|
||||
"""Persist the current generation plan as an inspectable trace."""
|
||||
if not self.db_manager or not self.history or not self.prompt_audit:
|
||||
return
|
||||
planned_files = list(self._static_files().keys()) + list(self._fallback_generated_files().keys())
|
||||
planned_files = list(self._static_files().keys()) + ['README.md', 'requirements.txt', 'main.py', 'tests/test_app.py']
|
||||
self.db_manager.log_llm_trace(
|
||||
project_id=self.project_id,
|
||||
history_id=self.history.id,
|
||||
@@ -228,7 +210,7 @@ class AgentOrchestrator:
|
||||
user_prompt=self.prompt_text or self.description,
|
||||
assistant_response=(
|
||||
f"Planned files: {', '.join(planned_files)}. "
|
||||
f"Target branch: {self.branch_name}. "
|
||||
f"Target branch: {self._delivery_branch_name()}. "
|
||||
f"Repository mode: {self.ui_manager.ui_data.get('repository', {}).get('mode', 'unknown')}."
|
||||
+ (
|
||||
f" Linked issue: #{self.related_issue.get('number')} {self.related_issue.get('title')}."
|
||||
@@ -239,7 +221,7 @@ class AgentOrchestrator:
|
||||
'planned_files': planned_files,
|
||||
'features': list(self.features),
|
||||
'tech_stack': list(self.tech_stack),
|
||||
'branch': self.branch_name,
|
||||
'branch': self._delivery_branch_name(),
|
||||
'repository': self.ui_manager.ui_data.get('repository', {}),
|
||||
'related_issue': self.related_issue,
|
||||
},
|
||||
@@ -335,7 +317,6 @@ class AgentOrchestrator:
|
||||
|
||||
async def _generate_prompt_driven_files(self) -> tuple[dict[str, str], dict | None, bool]:
|
||||
"""Use the configured LLM to generate prompt-specific project files."""
|
||||
fallback_files = self._fallback_generated_files()
|
||||
workspace_context = self._collect_workspace_context()
|
||||
has_existing_files = bool(workspace_context.get('has_existing_files'))
|
||||
if has_existing_files:
|
||||
@@ -410,10 +391,19 @@ class AgentOrchestrator:
|
||||
f"raw={raw_generated_paths or []}; accepted={accepted_paths or []}; rejected={rejected_paths or []}; "
|
||||
f"existing_workspace={has_existing_files}",
|
||||
)
|
||||
if has_existing_files:
|
||||
return generated_files, trace, True
|
||||
merged_files = {**fallback_files, **generated_files}
|
||||
return merged_files, trace, False
|
||||
if not content:
|
||||
detail = LLMServiceClient.extract_error_message(trace)
|
||||
if detail:
|
||||
raise RuntimeError(f'LLM code generation failed: {detail}')
|
||||
raise RuntimeError('LLM code generation did not return a usable response.')
|
||||
if not generated_files:
|
||||
raise RuntimeError('LLM code generation did not return any writable files.')
|
||||
if not has_existing_files:
|
||||
required_files = {'README.md', 'requirements.txt', 'main.py', 'tests/test_app.py'}
|
||||
missing_files = sorted(required_files - set(generated_files))
|
||||
if missing_files:
|
||||
raise RuntimeError(f"LLM code generation omitted required starter files: {', '.join(missing_files)}")
|
||||
return generated_files, trace, has_existing_files
|
||||
|
||||
async def _sync_issue_context(self) -> None:
|
||||
"""Sync repository issues and resolve a linked issue from the prompt when present."""
|
||||
@@ -573,11 +563,15 @@ class AgentOrchestrator:
|
||||
self.ui_manager.ui_data.setdefault('git', {})['remote_error'] = str(exc)
|
||||
self._append_log(f'Initial main push skipped: {exc}')
|
||||
|
||||
if self.git_manager.branch_exists(self.branch_name):
|
||||
self.git_manager.checkout_branch(self.branch_name)
|
||||
delivery_branch = self._delivery_branch_name()
|
||||
if self._should_use_pull_request_flow():
|
||||
if self.git_manager.branch_exists(self.branch_name):
|
||||
self.git_manager.checkout_branch(self.branch_name)
|
||||
else:
|
||||
self.git_manager.checkout_branch(self.branch_name, create=True, start_point='main')
|
||||
else:
|
||||
self.git_manager.checkout_branch(self.branch_name, create=True, start_point='main')
|
||||
self.ui_manager.ui_data.setdefault('git', {})['active_branch'] = self.branch_name
|
||||
self.git_manager.checkout_branch('main')
|
||||
self.ui_manager.ui_data.setdefault('git', {})['active_branch'] = delivery_branch
|
||||
|
||||
async def _ensure_pull_request(self) -> dict | None:
|
||||
"""Create the project pull request on first delivery and reuse it later."""
|
||||
@@ -642,16 +636,17 @@ class AgentOrchestrator:
|
||||
repository = self.ui_manager.ui_data.get("repository") or {}
|
||||
if not self._repository_supports_remote_delivery(repository):
|
||||
return None
|
||||
push_result = await self._push_branch(self.branch_name)
|
||||
delivery_branch = self._delivery_branch_name()
|
||||
push_result = await self._push_branch(delivery_branch)
|
||||
if push_result is None:
|
||||
return None
|
||||
pull_request = await self._ensure_pull_request()
|
||||
pull_request = await self._ensure_pull_request() if self._should_use_pull_request_flow() else None
|
||||
commit_url = self.gitea_api.build_commit_url(commit_hash, owner=self.repo_owner, repo=self.repo_name)
|
||||
compare_url = self.gitea_api.build_compare_url(base_commit, commit_hash, owner=self.repo_owner, repo=self.repo_name) if base_commit else None
|
||||
remote_record = {
|
||||
"status": "pushed",
|
||||
"remote": push_result.get('remote'),
|
||||
"branch": self.branch_name,
|
||||
"branch": delivery_branch,
|
||||
"commit_url": commit_url,
|
||||
"compare_url": compare_url,
|
||||
"changed_files": changed_files,
|
||||
@@ -661,7 +656,10 @@ class AgentOrchestrator:
|
||||
repository["last_commit_url"] = commit_url
|
||||
if compare_url:
|
||||
repository["last_compare_url"] = compare_url
|
||||
self._append_log(f"Pushed generated commit to {self.repo_owner}/{self.repo_name}.")
|
||||
if pull_request:
|
||||
self._append_log(f"Pushed generated commit to {self.repo_owner}/{self.repo_name} and updated the delivery pull request.")
|
||||
else:
|
||||
self._append_log(f"Pushed generated commit directly to {self.repo_owner}/{self.repo_name} on {delivery_branch}.")
|
||||
return remote_record
|
||||
|
||||
def _build_diff_text(self, relative_path: str, previous_content: str, new_content: str) -> str:
|
||||
@@ -845,7 +843,7 @@ class AgentOrchestrator:
|
||||
self._write_file(relative_path, content)
|
||||
if editing_existing_workspace and len(self.pending_code_changes) == change_count_before:
|
||||
raise RuntimeError('The LLM response did not produce any file changes for the existing project.')
|
||||
fallback_used = bool(trace and trace.get('fallback_used')) or trace is None
|
||||
fallback_used = bool(trace and trace.get('fallback_used'))
|
||||
if self.db_manager and self.history and self.prompt_audit and trace:
|
||||
self.db_manager.log_llm_trace(
|
||||
project_id=self.project_id,
|
||||
@@ -860,10 +858,7 @@ class AgentOrchestrator:
|
||||
raw_response=trace.get('raw_response'),
|
||||
fallback_used=fallback_used,
|
||||
)
|
||||
if fallback_used:
|
||||
self._append_log('LLM code generation was unavailable; used deterministic scaffolding fallback.')
|
||||
else:
|
||||
self._append_log('Application files generated from the prompt with the configured LLM.')
|
||||
self._append_log('Application files generated from the prompt with the configured LLM.')
|
||||
|
||||
async def _run_tests(self) -> None:
|
||||
"""Run tests for the generated code."""
|
||||
@@ -927,7 +922,7 @@ class AgentOrchestrator:
|
||||
"files": unique_files,
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"scope": "local",
|
||||
"branch": self.branch_name,
|
||||
"branch": self._delivery_branch_name(),
|
||||
}
|
||||
git_debug.update({
|
||||
'commit_status': 'committed',
|
||||
|
||||
@@ -28,14 +28,6 @@ class RequestInterpreter:
|
||||
GENERIC_PROJECT_NAME_WORDS = {
|
||||
'app', 'application', 'harness', 'platform', 'project', 'purpose', 'service', 'solution', 'suite', 'system', 'test', 'tool',
|
||||
}
|
||||
PLACEHOLDER_PROJECT_NAME_WORDS = {
|
||||
'generated project', 'new project', 'project', 'temporary name', 'temp name', 'placeholder', 'untitled project',
|
||||
}
|
||||
ROUTING_STOPWORDS = REPO_NOISE_WORDS | GENERIC_PROJECT_NAME_WORDS | {
|
||||
'about', 'after', 'again', 'appropriate', 'before', 'best', 'details', 'follow', 'following', 'implement',
|
||||
'integration', 'instance', 'instances', 'later', 'make', 'now', 'primary', 'primarily', 'probably',
|
||||
'remember', 'specific', 'suite', 'tearing', 'testing', 'through', 'used', 'using', 'workflow', 'workflows',
|
||||
}
|
||||
|
||||
def __init__(self, ollama_url: str | None = None, model: str | None = None):
|
||||
self.ollama_url = (ollama_url or settings.ollama_url).rstrip('/')
|
||||
@@ -96,47 +88,34 @@ class RequestInterpreter:
|
||||
},
|
||||
expect_json=True,
|
||||
)
|
||||
if content:
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
interpreted = self._normalize_interpreted_request(parsed, normalized)
|
||||
routing = self._normalize_routing(parsed.get('routing'), interpreted, compact_context)
|
||||
if routing.get('intent') == 'continue_project' and routing.get('project_name'):
|
||||
interpreted['name'] = routing['project_name']
|
||||
naming_trace = None
|
||||
if routing.get('intent') == 'new_project':
|
||||
interpreted, routing, naming_trace = await self._refine_new_project_identity(
|
||||
prompt_text=normalized,
|
||||
interpreted=interpreted,
|
||||
routing=routing,
|
||||
context=compact_context,
|
||||
)
|
||||
trace['routing'] = routing
|
||||
trace['context_excerpt'] = compact_context
|
||||
if naming_trace is not None:
|
||||
trace['project_naming'] = naming_trace
|
||||
return interpreted, trace
|
||||
except Exception:
|
||||
pass
|
||||
if not content:
|
||||
detail = self.llm_client.extract_error_message(trace)
|
||||
if detail:
|
||||
raise RuntimeError(f'LLM request interpretation failed: {detail}')
|
||||
raise RuntimeError('LLM request interpretation did not return a usable response.')
|
||||
|
||||
interpreted, routing = self._heuristic_fallback(normalized, compact_context)
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
except Exception as exc:
|
||||
raise RuntimeError('LLM request interpretation did not return valid JSON.') from exc
|
||||
|
||||
interpreted = self._normalize_interpreted_request(parsed)
|
||||
routing = self._normalize_routing(parsed.get('routing'), interpreted, compact_context)
|
||||
if routing.get('intent') == 'continue_project' and routing.get('project_name'):
|
||||
interpreted['name'] = routing['project_name']
|
||||
naming_trace = None
|
||||
if routing.get('intent') == 'new_project':
|
||||
constraints = await self._collect_project_identity_constraints(compact_context)
|
||||
routing['repo_name'] = self._ensure_unique_repo_name(routing.get('repo_name') or interpreted.get('name') or 'project', constraints['repo_names'])
|
||||
return interpreted, {
|
||||
'stage': 'request_interpretation',
|
||||
'provider': 'heuristic',
|
||||
'model': self.model,
|
||||
'system_prompt': system_prompt,
|
||||
'user_prompt': user_prompt,
|
||||
'assistant_response': json.dumps({'request': interpreted, 'routing': routing}),
|
||||
'raw_response': {'fallback': 'heuristic', 'llm_trace': trace.get('raw_response') if isinstance(trace, dict) else None},
|
||||
'routing': routing,
|
||||
'context_excerpt': compact_context,
|
||||
'guardrails': trace.get('guardrails') if isinstance(trace, dict) else [],
|
||||
'tool_context': trace.get('tool_context') if isinstance(trace, dict) else [],
|
||||
'fallback_used': True,
|
||||
}
|
||||
interpreted, routing, naming_trace = await self._refine_new_project_identity(
|
||||
prompt_text=normalized,
|
||||
interpreted=interpreted,
|
||||
routing=routing,
|
||||
context=compact_context,
|
||||
)
|
||||
trace['routing'] = routing
|
||||
trace['context_excerpt'] = compact_context
|
||||
if naming_trace is not None:
|
||||
trace['project_naming'] = naming_trace
|
||||
return interpreted, trace
|
||||
|
||||
async def _refine_new_project_identity(
|
||||
self,
|
||||
@@ -164,25 +143,22 @@ class RequestInterpreter:
|
||||
},
|
||||
expect_json=True,
|
||||
)
|
||||
if content:
|
||||
try:
|
||||
fallback_name = self._preferred_project_name_fallback(prompt_text, interpreted.get('name'))
|
||||
parsed = json.loads(content)
|
||||
project_name, repo_name = self._normalize_project_identity(
|
||||
parsed,
|
||||
fallback_name=fallback_name,
|
||||
)
|
||||
repo_name = self._ensure_unique_repo_name(repo_name, constraints['repo_names'])
|
||||
interpreted['name'] = project_name
|
||||
routing['project_name'] = project_name
|
||||
routing['repo_name'] = repo_name
|
||||
return interpreted, routing, trace
|
||||
except Exception:
|
||||
pass
|
||||
if not content:
|
||||
detail = self.llm_client.extract_error_message(trace)
|
||||
if detail:
|
||||
raise RuntimeError(f'LLM project naming failed: {detail}')
|
||||
raise RuntimeError('LLM project naming did not return a usable response.')
|
||||
|
||||
fallback_name = self._preferred_project_name_fallback(prompt_text, interpreted.get('name'))
|
||||
routing['project_name'] = fallback_name
|
||||
routing['repo_name'] = self._ensure_unique_repo_name(self._derive_repo_name(fallback_name), constraints['repo_names'])
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
except Exception as exc:
|
||||
raise RuntimeError('LLM project naming did not return valid JSON.') from exc
|
||||
|
||||
project_name, repo_name = self._normalize_project_identity(parsed)
|
||||
repo_name = self._ensure_unique_repo_name(repo_name, constraints['repo_names'])
|
||||
interpreted['name'] = project_name
|
||||
routing['project_name'] = project_name
|
||||
routing['repo_name'] = repo_name
|
||||
return interpreted, routing, trace
|
||||
|
||||
async def _collect_project_identity_constraints(self, context: dict) -> dict[str, set[str]]:
|
||||
@@ -212,17 +188,19 @@ class RequestInterpreter:
|
||||
return set()
|
||||
return {str(repo.get('name')).strip() for repo in repos if repo.get('name')}
|
||||
|
||||
def _normalize_interpreted_request(self, interpreted: dict, original_prompt: str) -> dict:
|
||||
def _normalize_interpreted_request(self, interpreted: dict) -> dict:
|
||||
"""Normalize LLM output into the required request shape."""
|
||||
request_payload = interpreted.get('request') if isinstance(interpreted.get('request'), dict) else interpreted
|
||||
name = str(interpreted.get('name') or '').strip() or self._derive_name(original_prompt)
|
||||
if isinstance(request_payload, dict):
|
||||
name = str(request_payload.get('name') or '').strip() or self._derive_name(original_prompt)
|
||||
description = str((request_payload or {}).get('description') or '').strip() or original_prompt[:255]
|
||||
features = self._normalize_list((request_payload or {}).get('features'))
|
||||
tech_stack = self._normalize_list((request_payload or {}).get('tech_stack'))
|
||||
if not features:
|
||||
features = ['core workflow based on free-form request']
|
||||
if not isinstance(request_payload, dict):
|
||||
raise RuntimeError('LLM request interpretation did not include a request object.')
|
||||
name = str(request_payload.get('name') or '').strip()
|
||||
description = str(request_payload.get('description') or '').strip()
|
||||
if not name:
|
||||
raise RuntimeError('LLM request interpretation did not provide a project name.')
|
||||
if not description:
|
||||
raise RuntimeError('LLM request interpretation did not provide a project description.')
|
||||
features = self._normalize_list(request_payload.get('features'))
|
||||
tech_stack = self._normalize_list(request_payload.get('tech_stack'))
|
||||
return {
|
||||
'name': name[:255],
|
||||
'description': description[:255],
|
||||
@@ -256,6 +234,9 @@ class RequestInterpreter:
|
||||
def _normalize_routing(self, routing: dict | None, interpreted: dict, context: dict) -> dict:
|
||||
"""Normalize routing metadata returned by the LLM."""
|
||||
routing = routing or {}
|
||||
intent = str(routing.get('intent') or '').strip()
|
||||
if intent not in {'new_project', 'continue_project'}:
|
||||
raise RuntimeError('LLM request interpretation did not provide a valid routing intent.')
|
||||
project_id = routing.get('project_id')
|
||||
project_name = routing.get('project_name')
|
||||
issue_number = routing.get('issue_number')
|
||||
@@ -264,33 +245,32 @@ class RequestInterpreter:
|
||||
elif isinstance(issue_number, str) and issue_number.isdigit():
|
||||
issue_number = int(issue_number)
|
||||
matched_project = None
|
||||
for project in context.get('projects', []):
|
||||
if project_id and project.get('project_id') == project_id:
|
||||
matched_project = project
|
||||
break
|
||||
if project_name and project.get('name') == project_name:
|
||||
matched_project = project
|
||||
break
|
||||
intent = str(routing.get('intent') or '').strip() or ('continue_project' if matched_project else 'new_project')
|
||||
if matched_project is None and intent == 'continue_project':
|
||||
recent_chat_history = context.get('recent_chat_history', [])
|
||||
recent_project_id = recent_chat_history[0].get('project_id') if recent_chat_history else None
|
||||
if recent_project_id:
|
||||
matched_project = next(
|
||||
(project for project in context.get('projects', []) if project.get('project_id') == recent_project_id),
|
||||
None,
|
||||
)
|
||||
if intent == 'continue_project':
|
||||
for project in context.get('projects', []):
|
||||
if project_id and project.get('project_id') == project_id:
|
||||
matched_project = project
|
||||
break
|
||||
if project_name and project.get('name') == project_name:
|
||||
matched_project = project
|
||||
break
|
||||
elif project_id:
|
||||
matched_project = next(
|
||||
(project for project in context.get('projects', []) if project.get('project_id') == project_id),
|
||||
None,
|
||||
)
|
||||
if intent == 'continue_project' and matched_project is None:
|
||||
raise RuntimeError('LLM selected continue_project without identifying a tracked project from prompt history.')
|
||||
if intent == 'new_project' and matched_project is not None:
|
||||
raise RuntimeError('LLM selected new_project while also pointing at an existing tracked project.')
|
||||
normalized = {
|
||||
'intent': intent,
|
||||
'project_id': matched_project.get('project_id') if matched_project else project_id,
|
||||
'project_name': matched_project.get('name') if matched_project else (project_name or interpreted.get('name')),
|
||||
'repo_name': routing.get('repo_name') if intent == 'new_project' else None,
|
||||
'repo_name': str(routing.get('repo_name') or '').strip() or None if intent == 'new_project' else None,
|
||||
'issue_number': issue_number,
|
||||
'confidence': routing.get('confidence') or ('medium' if matched_project else 'low'),
|
||||
'reasoning_summary': routing.get('reasoning_summary') or ('Matched prior project context' if matched_project else 'No strong prior project match found'),
|
||||
'confidence': routing.get('confidence') or 'medium',
|
||||
'reasoning_summary': routing.get('reasoning_summary') or '',
|
||||
}
|
||||
if normalized['intent'] == 'new_project' and not normalized['repo_name']:
|
||||
normalized['repo_name'] = self._derive_repo_name(normalized['project_name'] or interpreted.get('name') or 'Generated Project')
|
||||
return normalized
|
||||
|
||||
def _normalize_list(self, value) -> list[str]:
|
||||
@@ -300,42 +280,6 @@ class RequestInterpreter:
|
||||
return [item.strip() for item in value.split(',') if item.strip()]
|
||||
return []
|
||||
|
||||
def _derive_name(self, prompt_text: str) -> str:
|
||||
"""Derive a stable project name when the LLM does not provide one."""
|
||||
first_line = prompt_text.splitlines()[0].strip()
|
||||
quoted = re.search(r'["\']([^"\']{3,80})["\']', first_line)
|
||||
if quoted:
|
||||
return self._humanize_name(quoted.group(1))
|
||||
|
||||
noun_phrase = re.search(
|
||||
r'(?:build|create|start|make|develop|generate|design|need|want)\s+'
|
||||
r'(?:me\s+|us\s+|an?\s+|the\s+|new\s+|internal\s+|simple\s+|lightweight\s+|modern\s+|web\s+|mobile\s+)*'
|
||||
r'([a-z0-9][a-z0-9\s-]{2,80}?(?:portal|dashboard|app|application|service|tool|system|platform|api|bot|assistant|website|site|workspace|tracker|manager|harness|runner|framework|suite|pipeline|lab))\b',
|
||||
first_line,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
if noun_phrase:
|
||||
return self._humanize_name(noun_phrase.group(1))
|
||||
|
||||
focused_phrase = re.search(
|
||||
r'(?:purpose\s+is\s+to\s+create\s+(?:an?\s+)?)'
|
||||
r'([a-z0-9][a-z0-9\s-]{2,80}?(?:portal|dashboard|app|application|service|tool|system|platform|api|bot|assistant|website|site|workspace|tracker|manager|harness|runner|framework|suite|pipeline|lab))\b',
|
||||
first_line,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
if focused_phrase:
|
||||
return self._humanize_name(focused_phrase.group(1))
|
||||
|
||||
cleaned = re.sub(r'[^A-Za-z0-9 ]+', ' ', first_line)
|
||||
stopwords = {
|
||||
'build', 'create', 'start', 'make', 'develop', 'generate', 'design', 'need', 'want', 'please', 'for', 'our', 'with', 'that', 'this',
|
||||
'new', 'internal', 'simple', 'modern', 'web', 'mobile', 'app', 'application', 'tool', 'system',
|
||||
}
|
||||
tokens = [word for word in cleaned.split() if word and word.lower() not in stopwords]
|
||||
if tokens:
|
||||
return self._humanize_name(' '.join(tokens[:4]))
|
||||
return 'Generated Project'
|
||||
|
||||
def _humanize_name(self, raw_name: str) -> str:
|
||||
"""Normalize a candidate project name into a readable title."""
|
||||
cleaned = re.sub(r'[^A-Za-z0-9\s-]+', ' ', raw_name).strip(' -')
|
||||
@@ -412,15 +356,6 @@ class RequestInterpreter:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _preferred_project_name_fallback(self, prompt_text: str, interpreted_name: str | None) -> str:
|
||||
"""Pick the best fallback title when the earlier interpretation produced a placeholder."""
|
||||
interpreted_clean = self._humanize_name(str(interpreted_name or '').strip()) if interpreted_name else ''
|
||||
normalized_interpreted = interpreted_clean.lower()
|
||||
if normalized_interpreted and normalized_interpreted not in self.PLACEHOLDER_PROJECT_NAME_WORDS:
|
||||
if not (len(normalized_interpreted.split()) == 1 and normalized_interpreted in self.GENERIC_PROJECT_NAME_WORDS):
|
||||
return interpreted_clean
|
||||
return self._derive_name(prompt_text)
|
||||
|
||||
def _ensure_unique_repo_name(self, repo_name: str, reserved_names: set[str]) -> str:
|
||||
"""Choose a repository slug that does not collide with tracked or remote repositories."""
|
||||
base_name = self._derive_repo_name(repo_name)
|
||||
@@ -431,101 +366,19 @@ class RequestInterpreter:
|
||||
suffix += 1
|
||||
return f'{base_name}-{suffix}'
|
||||
|
||||
def _normalize_project_identity(self, payload: dict, fallback_name: str) -> tuple[str, str]:
|
||||
"""Normalize model-proposed project and repository naming."""
|
||||
fallback_project_name = self._humanize_name(str(fallback_name or 'Generated Project'))
|
||||
def _normalize_project_identity(self, payload: dict) -> tuple[str, str]:
|
||||
"""Validate model-proposed project and repository naming."""
|
||||
project_candidate = str(payload.get('project_name') or payload.get('name') or '').strip()
|
||||
project_name = fallback_project_name
|
||||
if project_candidate and self._should_use_project_name_candidate(project_candidate, fallback_project_name):
|
||||
project_name = self._humanize_name(project_candidate)
|
||||
repo_candidate = str(payload.get('repo_name') or '').strip()
|
||||
repo_name = self._derive_repo_name(project_name)
|
||||
if repo_candidate and self._should_use_repo_name_candidate(repo_candidate, project_name):
|
||||
repo_name = self._derive_repo_name(repo_candidate)
|
||||
return project_name, repo_name
|
||||
|
||||
def _heuristic_fallback(self, prompt_text: str, context: dict | None = None) -> tuple[dict, dict]:
|
||||
"""Fallback request extraction when Ollama is unavailable."""
|
||||
lowered = prompt_text.lower()
|
||||
tech_candidates = [
|
||||
'python', 'fastapi', 'django', 'flask', 'postgresql', 'sqlite', 'react', 'vue', 'nicegui', 'docker'
|
||||
]
|
||||
tech_stack = [candidate for candidate in tech_candidates if candidate in lowered]
|
||||
sentences = [part.strip() for part in re.split(r'[\n\.]+', prompt_text) if part.strip()]
|
||||
features = sentences[:3] or ['Implement the user request from free-form text']
|
||||
interpreted = {
|
||||
'name': self._derive_name(prompt_text),
|
||||
'description': sentences[0][:255] if sentences else prompt_text[:255],
|
||||
'features': features,
|
||||
'tech_stack': tech_stack,
|
||||
}
|
||||
routing = self._heuristic_routing(prompt_text, context or {})
|
||||
if routing.get('project_name'):
|
||||
interpreted['name'] = routing['project_name']
|
||||
return interpreted, routing
|
||||
|
||||
def _heuristic_routing(self, prompt_text: str, context: dict) -> dict:
|
||||
"""Best-effort routing when the LLM is unavailable."""
|
||||
lowered = prompt_text.lower()
|
||||
explicit_new = any(token in lowered for token in ['new project', 'start a new project', 'create a new project', 'build a new app'])
|
||||
referenced_issue = self._extract_issue_number(prompt_text)
|
||||
recent_history = context.get('recent_chat_history', [])
|
||||
projects = context.get('projects', [])
|
||||
last_project_id = recent_history[0].get('project_id') if recent_history else None
|
||||
last_issue = ((recent_history[0].get('related_issue') or {}).get('number') if recent_history else None)
|
||||
last_project = next((project for project in projects if project.get('project_id') == last_project_id), None) if last_project_id else None
|
||||
|
||||
matched_project = None
|
||||
for project in projects:
|
||||
name = (project.get('name') or '').lower()
|
||||
repo = ((project.get('repository') or {}).get('name') or '').lower()
|
||||
if name and name in lowered:
|
||||
matched_project = project
|
||||
break
|
||||
if repo and repo in lowered:
|
||||
matched_project = project
|
||||
break
|
||||
if matched_project is None and not explicit_new:
|
||||
follow_up_tokens = ['also', 'continue', 'for this project', 'for that project', 'work on this', 'work on that', 'fix that', 'add this']
|
||||
leading_follow_up = lowered.startswith(('also', 'now', 'continue', 'remember', 'then'))
|
||||
recent_overlap = 0
|
||||
if last_project is not None:
|
||||
recent_prompt_text = recent_history[0].get('prompt_text') or ''
|
||||
project_reference_text = ' '.join(
|
||||
part for part in [
|
||||
last_project.get('name') or '',
|
||||
last_project.get('description') or '',
|
||||
((last_project.get('repository') or {}).get('name') or ''),
|
||||
]
|
||||
if part
|
||||
)
|
||||
recent_overlap = len(
|
||||
self._routing_tokens(prompt_text)
|
||||
& (self._routing_tokens(recent_prompt_text) | self._routing_tokens(project_reference_text))
|
||||
)
|
||||
if last_project_id and (leading_follow_up or any(token in lowered for token in follow_up_tokens) or recent_overlap >= 2):
|
||||
matched_project = last_project
|
||||
issue_number = referenced_issue
|
||||
if issue_number is None and any(token in lowered for token in ['that issue', 'this issue', 'the issue']) and last_issue is not None:
|
||||
issue_number = last_issue
|
||||
intent = 'new_project' if explicit_new or matched_project is None else 'continue_project'
|
||||
return {
|
||||
'intent': intent,
|
||||
'project_id': matched_project.get('project_id') if matched_project else None,
|
||||
'project_name': matched_project.get('name') if matched_project else self._derive_name(prompt_text),
|
||||
'repo_name': None if matched_project else self._derive_repo_name(self._derive_name(prompt_text)),
|
||||
'issue_number': issue_number,
|
||||
'confidence': 'medium' if matched_project or explicit_new else 'low',
|
||||
'reasoning_summary': 'Heuristic routing from chat history and project names.',
|
||||
}
|
||||
|
||||
def _routing_tokens(self, text: str) -> set[str]:
|
||||
"""Extract meaningful tokens for heuristic continuation matching."""
|
||||
cleaned = re.sub(r'[^a-z0-9]+', ' ', (text or '').lower())
|
||||
return {
|
||||
token for token in cleaned.split()
|
||||
if len(token) >= 4 and token not in self.ROUTING_STOPWORDS
|
||||
}
|
||||
if not project_candidate:
|
||||
raise RuntimeError('LLM project naming did not provide a project name.')
|
||||
if not repo_candidate:
|
||||
raise RuntimeError('LLM project naming did not provide a repository slug.')
|
||||
if not self._should_use_project_name_candidate(project_candidate, project_candidate):
|
||||
raise RuntimeError('LLM project naming returned an unusable project name.')
|
||||
if not self._should_use_repo_name_candidate(repo_candidate, project_candidate):
|
||||
raise RuntimeError('LLM project naming returned an unusable repository slug.')
|
||||
return self._humanize_name(project_candidate), self._derive_repo_name(repo_candidate)
|
||||
|
||||
def _extract_issue_number(self, prompt_text: str) -> int | None:
|
||||
match = re.search(r'(?:#|issue\s+)(\d+)', prompt_text, flags=re.IGNORECASE)
|
||||
|
||||
@@ -725,6 +725,20 @@ def _load_home_assistant_health_snapshot() -> dict:
|
||||
}
|
||||
|
||||
|
||||
def _load_ollama_health_snapshot() -> dict:
|
||||
"""Load an Ollama health snapshot for UI rendering."""
|
||||
try:
|
||||
return LLMServiceClient().health_check_sync()
|
||||
except Exception as exc:
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': f'Unable to run Ollama health checks: {exc}',
|
||||
'ollama_url': settings.ollama_url or 'Not configured',
|
||||
'model': settings.OLLAMA_MODEL,
|
||||
'checks': [],
|
||||
}
|
||||
|
||||
|
||||
def _add_dashboard_styles() -> None:
|
||||
"""Register shared dashboard styles."""
|
||||
ui.add_head_html(
|
||||
@@ -821,6 +835,7 @@ def _render_confirmation_dialog(title: str, message: str, confirm_label: str, on
|
||||
def _render_health_panels() -> None:
|
||||
"""Render application, integration, and queue health panels."""
|
||||
runtime = get_database_runtime_summary()
|
||||
ollama_health = _load_ollama_health_snapshot()
|
||||
n8n_health = _load_n8n_health_snapshot()
|
||||
gitea_health = _load_gitea_health_snapshot()
|
||||
home_assistant_health = _load_home_assistant_health_snapshot()
|
||||
@@ -843,6 +858,33 @@ def _render_health_panels() -> None:
|
||||
ui.label(label).classes('factory-muted')
|
||||
ui.label(str(value)).style('font-weight: 600; color: #3a281a;')
|
||||
|
||||
with ui.card().classes('factory-panel q-pa-lg'):
|
||||
ui.label('Ollama / LLM').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
|
||||
ui.label(ollama_health.get('status', 'unknown').upper()).classes('factory-chip')
|
||||
ui.label(ollama_health.get('message', 'No Ollama status available.')).classes('factory-muted q-mt-sm')
|
||||
rows = [
|
||||
('Ollama URL', ollama_health.get('ollama_url') or 'Not configured'),
|
||||
('Configured Model', ollama_health.get('model') or 'Not configured'),
|
||||
('Model Available', 'yes' if ollama_health.get('model_available') else 'no'),
|
||||
('Visible Models', ollama_health.get('model_count') if ollama_health.get('model_count') is not None else 'unknown'),
|
||||
]
|
||||
for label, value in rows:
|
||||
with ui.row().classes('justify-between w-full q-mt-sm'):
|
||||
ui.label(label).classes('factory-muted')
|
||||
ui.label(str(value)).style('font-weight: 600; color: #3a281a;')
|
||||
if ollama_health.get('models'):
|
||||
ui.label('Reported Models').style('font-size: 1rem; font-weight: 700; color: #3a281a; margin-top: 12px;')
|
||||
ui.label(', '.join(str(model) for model in ollama_health.get('models', []))).classes('factory-muted')
|
||||
if ollama_health.get('suggestion'):
|
||||
ui.label(ollama_health['suggestion']).classes('factory-chip q-mt-md')
|
||||
for check in ollama_health.get('checks', []):
|
||||
status = 'OK' if check.get('ok') else 'FAIL'
|
||||
ui.markdown(
|
||||
f"- **{escape(check.get('name', 'check'))}** · {status} · {escape(str(check.get('status_code') or 'n/a'))} · {escape(check.get('url') or 'unknown url')}"
|
||||
)
|
||||
if check.get('message'):
|
||||
ui.label(check['message']).classes('factory-muted')
|
||||
|
||||
with ui.card().classes('factory-panel q-pa-lg'):
|
||||
ui.label('n8n Connection Status').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
|
||||
status_label = n8n_health.get('status', 'unknown').upper()
|
||||
@@ -930,7 +972,7 @@ def create_health_page() -> None:
|
||||
with ui.row().classes('items-center justify-between w-full'):
|
||||
with ui.column().classes('gap-1'):
|
||||
ui.label('Factory Health').style('font-size: 2rem; font-weight: 800; color: #302116;')
|
||||
ui.label('Current application and n8n connectivity diagnostics.').classes('factory-muted')
|
||||
ui.label('Current application, Ollama, and integration connectivity diagnostics.').classes('factory-muted')
|
||||
with ui.row().classes('items-center gap-2'):
|
||||
ui.link('Back to Dashboard', '/')
|
||||
ui.link('Refresh Health', '/health-ui')
|
||||
|
||||
@@ -187,7 +187,6 @@ async def _derive_project_id_for_request(
|
||||
) -> tuple[str, dict | None]:
|
||||
"""Derive a stable project id for a newly created project."""
|
||||
reserved_ids = {str(project.get('project_id')).strip() for project in existing_projects if project.get('project_id')}
|
||||
fallback_id = _ensure_unique_identifier((prompt_routing or {}).get('project_name') or request.name, reserved_ids)
|
||||
user_prompt = (
|
||||
f"Original user prompt:\n{prompt_text}\n\n"
|
||||
f"Structured request:\n{json.dumps({'name': request.name, 'description': request.description, 'features': request.features, 'tech_stack': request.tech_stack}, indent=2)}\n\n"
|
||||
@@ -202,14 +201,19 @@ async def _derive_project_id_for_request(
|
||||
tool_context_input={'projects': existing_projects},
|
||||
expect_json=True,
|
||||
)
|
||||
if content:
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
candidate = parsed.get('project_id') or parsed.get('slug') or request.name
|
||||
return _ensure_unique_identifier(str(candidate), reserved_ids), trace
|
||||
except Exception:
|
||||
pass
|
||||
return fallback_id, trace
|
||||
if not content:
|
||||
detail = LLMServiceClient.extract_error_message(trace)
|
||||
if detail:
|
||||
raise RuntimeError(f'LLM project id naming failed: {detail}')
|
||||
raise RuntimeError('LLM project id naming did not return a usable response.')
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
except Exception as exc:
|
||||
raise RuntimeError('LLM project id naming did not return valid JSON.') from exc
|
||||
candidate = str(parsed.get('project_id') or parsed.get('slug') or '').strip()
|
||||
if not candidate:
|
||||
raise RuntimeError('LLM project id naming did not provide a project id.')
|
||||
return _ensure_unique_identifier(candidate, reserved_ids), trace
|
||||
|
||||
|
||||
def _serialize_project(history: ProjectHistory) -> dict:
|
||||
@@ -282,6 +286,51 @@ def _compose_prompt_text(request: SoftwareRequest) -> str:
|
||||
)
|
||||
|
||||
|
||||
def _generation_error_payload(
|
||||
*,
|
||||
message: str,
|
||||
request: SoftwareRequest | None = None,
|
||||
source: dict | None = None,
|
||||
interpreted_request: dict | None = None,
|
||||
routing: dict | None = None,
|
||||
) -> dict:
|
||||
"""Return a workflow-safe JSON payload for expected generation failures."""
|
||||
response = {
|
||||
'status': 'error',
|
||||
'message': message,
|
||||
'error': message,
|
||||
'summary_message': message,
|
||||
'summary_metadata': {
|
||||
'provider': None,
|
||||
'model': None,
|
||||
'fallback_used': False,
|
||||
},
|
||||
'data': {
|
||||
'history_id': None,
|
||||
'project_id': None,
|
||||
'name': request.name if request is not None else (interpreted_request or {}).get('name'),
|
||||
'description': request.description if request is not None else (interpreted_request or {}).get('description'),
|
||||
'status': 'error',
|
||||
'progress': 0,
|
||||
'message': message,
|
||||
'current_step': None,
|
||||
'error_message': message,
|
||||
'logs': [],
|
||||
'changed_files': [],
|
||||
'repository': None,
|
||||
'pull_request': None,
|
||||
'summary_message': message,
|
||||
},
|
||||
}
|
||||
if source is not None:
|
||||
response['source'] = source
|
||||
if interpreted_request is not None:
|
||||
response['interpreted_request'] = interpreted_request
|
||||
if routing is not None:
|
||||
response['routing'] = routing
|
||||
return response
|
||||
|
||||
|
||||
async def _run_generation(
|
||||
request: SoftwareRequest,
|
||||
db: Session,
|
||||
@@ -519,6 +568,11 @@ def _get_home_assistant_health() -> dict:
|
||||
return _create_home_assistant_agent().health_check_sync()
|
||||
|
||||
|
||||
def _get_ollama_health() -> dict:
|
||||
"""Return current Ollama connectivity diagnostics."""
|
||||
return LLMServiceClient().health_check_sync()
|
||||
|
||||
|
||||
async def _get_queue_gate_status(force: bool = False) -> dict:
|
||||
"""Return whether queued prompts may be processed now."""
|
||||
if not database_module.settings.prompt_queue_enabled:
|
||||
@@ -802,6 +856,7 @@ def health_check():
|
||||
'database_target': runtime['target'],
|
||||
'database_name': runtime['database'],
|
||||
'integrations': {
|
||||
'ollama': _get_ollama_health(),
|
||||
'gitea': _get_gitea_health(),
|
||||
'home_assistant': _get_home_assistant_health(),
|
||||
},
|
||||
@@ -875,7 +930,15 @@ def reset_runtime_setting(setting_key: str, db: DbSession):
|
||||
@app.post('/generate')
|
||||
async def generate_software(request: SoftwareRequest, db: DbSession):
|
||||
"""Create and record a software-generation request."""
|
||||
return await _run_generation(request, db)
|
||||
try:
|
||||
return await _run_generation(request, db)
|
||||
except Exception as exc:
|
||||
DatabaseManager(db).log_system_event(
|
||||
component='api',
|
||||
level='ERROR',
|
||||
message=f"Structured generation failed: {exc}",
|
||||
)
|
||||
return _generation_error_payload(message=str(exc), request=request)
|
||||
|
||||
|
||||
@app.post('/generate/text')
|
||||
@@ -919,7 +982,22 @@ async def generate_software_from_text(request: FreeformSoftwareRequest, db: DbSe
|
||||
},
|
||||
}
|
||||
|
||||
return await _run_freeform_generation(request, db)
|
||||
try:
|
||||
return await _run_freeform_generation(request, db)
|
||||
except Exception as exc:
|
||||
DatabaseManager(db).log_system_event(
|
||||
component='api',
|
||||
level='ERROR',
|
||||
message=f"Free-form generation failed for source={request.source}: {exc}",
|
||||
)
|
||||
return _generation_error_payload(
|
||||
message=str(exc),
|
||||
source={
|
||||
'type': request.source,
|
||||
'chat_id': request.chat_id,
|
||||
'chat_type': request.chat_type,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@app.get('/queue')
|
||||
|
||||
Reference in New Issue
Block a user