|
|
|
|
@@ -3,9 +3,11 @@
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
|
|
|
|
import difflib
|
|
|
|
|
import json
|
|
|
|
|
import py_compile
|
|
|
|
|
import re
|
|
|
|
|
import subprocess
|
|
|
|
|
from pathlib import PurePosixPath
|
|
|
|
|
from typing import Optional
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
@@ -14,18 +16,27 @@ try:
|
|
|
|
|
from .database_manager import DatabaseManager
|
|
|
|
|
from .git_manager import GitManager
|
|
|
|
|
from .gitea import GiteaAPI
|
|
|
|
|
from .llm_service import LLMServiceClient
|
|
|
|
|
from .ui_manager import UIManager
|
|
|
|
|
except ImportError:
|
|
|
|
|
from config import settings
|
|
|
|
|
from agents.database_manager import DatabaseManager
|
|
|
|
|
from agents.git_manager import GitManager
|
|
|
|
|
from agents.gitea import GiteaAPI
|
|
|
|
|
from agents.llm_service import LLMServiceClient
|
|
|
|
|
from agents.ui_manager import UIManager
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AgentOrchestrator:
|
|
|
|
|
"""Orchestrates the software generation process with full audit trail."""
|
|
|
|
|
|
|
|
|
|
REMOTE_READY_REPOSITORY_MODES = {'project', 'onboarded'}
|
|
|
|
|
REMOTE_READY_REPOSITORY_STATUSES = {'created', 'exists', 'ready', 'onboarded'}
|
|
|
|
|
GENERATED_TEXT_FILE_SUFFIXES = {'.py', '.md', '.txt', '.toml', '.yaml', '.yml', '.json', '.ini', '.cfg', '.sh', '.html', '.css', '.js', '.ts'}
|
|
|
|
|
GENERATED_TEXT_FILE_NAMES = {'README', 'README.md', '.gitignore', 'requirements.txt', 'pyproject.toml', 'Dockerfile', 'Containerfile', 'Makefile'}
|
|
|
|
|
MAX_WORKSPACE_CONTEXT_FILES = 20
|
|
|
|
|
MAX_WORKSPACE_CONTEXT_CHARS = 24000
|
|
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
project_id: str,
|
|
|
|
|
@@ -77,6 +88,7 @@ class AgentOrchestrator:
|
|
|
|
|
self.branch_name = self._build_pr_branch_name(project_id)
|
|
|
|
|
self.active_pull_request = None
|
|
|
|
|
self._gitea_username: str | None = None
|
|
|
|
|
existing_repository: dict | None = None
|
|
|
|
|
hinted_issue_number = (related_issue_hint or {}).get('number') if related_issue_hint else None
|
|
|
|
|
self.related_issue_number = hinted_issue_number if hinted_issue_number is not None else self._extract_issue_number(prompt_text)
|
|
|
|
|
self.related_issue: dict | None = DatabaseManager._normalize_issue(related_issue_hint)
|
|
|
|
|
@@ -107,9 +119,12 @@ class AgentOrchestrator:
|
|
|
|
|
latest_ui = self.db_manager._get_latest_ui_snapshot_data(self.history.id)
|
|
|
|
|
repository = latest_ui.get('repository') if isinstance(latest_ui, dict) else None
|
|
|
|
|
if isinstance(repository, dict) and repository:
|
|
|
|
|
existing_repository = dict(repository)
|
|
|
|
|
self.repo_owner = repository.get('owner') or self.repo_owner
|
|
|
|
|
self.repo_name = repository.get('name') or self.repo_name
|
|
|
|
|
self.repo_url = repository.get('url') or self.repo_url
|
|
|
|
|
git_state = latest_ui.get('git') if isinstance(latest_ui.get('git'), dict) else {}
|
|
|
|
|
self.branch_name = git_state.get('active_branch') or self.branch_name
|
|
|
|
|
if self.prompt_text:
|
|
|
|
|
self.prompt_audit = self.db_manager.log_prompt_submission(
|
|
|
|
|
history_id=self.history.id,
|
|
|
|
|
@@ -126,18 +141,60 @@ class AgentOrchestrator:
|
|
|
|
|
self.ui_manager.ui_data["project_root"] = str(self.project_root)
|
|
|
|
|
self.ui_manager.ui_data["features"] = list(self.features)
|
|
|
|
|
self.ui_manager.ui_data["tech_stack"] = list(self.tech_stack)
|
|
|
|
|
self.ui_manager.ui_data["repository"] = {
|
|
|
|
|
repository_ui = {
|
|
|
|
|
"owner": self.repo_owner,
|
|
|
|
|
"name": self.repo_name,
|
|
|
|
|
"mode": "project" if settings.use_project_repositories else "shared",
|
|
|
|
|
"status": "pending" if settings.use_project_repositories else "shared",
|
|
|
|
|
"provider": "gitea",
|
|
|
|
|
}
|
|
|
|
|
if existing_repository:
|
|
|
|
|
repository_ui.update(existing_repository)
|
|
|
|
|
self.ui_manager.ui_data["repository"] = repository_ui
|
|
|
|
|
if self.related_issue:
|
|
|
|
|
self.ui_manager.ui_data["related_issue"] = self.related_issue
|
|
|
|
|
if self.active_pull_request:
|
|
|
|
|
self.ui_manager.ui_data["pull_request"] = self.active_pull_request
|
|
|
|
|
|
|
|
|
|
def _repository_supports_remote_delivery(self, repository: dict | None = None) -> bool:
|
|
|
|
|
"""Return whether repository metadata supports git push and PR delivery."""
|
|
|
|
|
repo = repository or self.ui_manager.ui_data.get('repository') or {}
|
|
|
|
|
return repo.get('mode') in self.REMOTE_READY_REPOSITORY_MODES and repo.get('status') in self.REMOTE_READY_REPOSITORY_STATUSES
|
|
|
|
|
|
|
|
|
|
def _static_files(self) -> dict[str, str]:
|
|
|
|
|
"""Files that do not need prompt-specific generation."""
|
|
|
|
|
return {
|
|
|
|
|
".gitignore": "__pycache__/\n*.pyc\n.venv/\n.pytest_cache/\n.mypy_cache/\n",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def _fallback_generated_files(self) -> dict[str, str]:
|
|
|
|
|
"""Deterministic fallback files when LLM generation is unavailable."""
|
|
|
|
|
feature_section = "\n".join(f"- {feature}" for feature in self.features) or "- None specified"
|
|
|
|
|
tech_section = "\n".join(f"- {tech}" for tech in self.tech_stack) or "- Python"
|
|
|
|
|
return {
|
|
|
|
|
"README.md": (
|
|
|
|
|
f"# {self.project_name}\n\n"
|
|
|
|
|
f"{self.description}\n\n"
|
|
|
|
|
"## Features\n"
|
|
|
|
|
f"{feature_section}\n\n"
|
|
|
|
|
"## Tech Stack\n"
|
|
|
|
|
f"{tech_section}\n"
|
|
|
|
|
),
|
|
|
|
|
"requirements.txt": "fastapi\nuvicorn\npytest\n",
|
|
|
|
|
"main.py": (
|
|
|
|
|
"from fastapi import FastAPI\n\n"
|
|
|
|
|
"app = FastAPI(title=\"Generated App\")\n\n"
|
|
|
|
|
"@app.get('/')\n"
|
|
|
|
|
"def read_root():\n"
|
|
|
|
|
f" return {{'name': '{self.project_name}', 'status': 'generated', 'features': {self.features!r}}}\n"
|
|
|
|
|
),
|
|
|
|
|
"tests/test_app.py": (
|
|
|
|
|
"from main import read_root\n\n"
|
|
|
|
|
"def test_read_root():\n"
|
|
|
|
|
f" assert read_root()['name'] == '{self.project_name}'\n"
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def _build_pr_branch_name(self, project_id: str) -> str:
|
|
|
|
|
"""Build a stable branch name used until the PR is merged."""
|
|
|
|
|
return f"ai/{project_id}"
|
|
|
|
|
@@ -158,7 +215,7 @@ class AgentOrchestrator:
|
|
|
|
|
"""Persist the current generation plan as an inspectable trace."""
|
|
|
|
|
if not self.db_manager or not self.history or not self.prompt_audit:
|
|
|
|
|
return
|
|
|
|
|
planned_files = list(self._template_files().keys())
|
|
|
|
|
planned_files = list(self._static_files().keys()) + list(self._fallback_generated_files().keys())
|
|
|
|
|
self.db_manager.log_llm_trace(
|
|
|
|
|
project_id=self.project_id,
|
|
|
|
|
history_id=self.history.id,
|
|
|
|
|
@@ -188,6 +245,155 @@ class AgentOrchestrator:
|
|
|
|
|
fallback_used=False,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def _is_safe_relative_path(self, path: str) -> bool:
|
|
|
|
|
"""Return whether a generated file path is safe to write under the project root."""
|
|
|
|
|
normalized = str(PurePosixPath((path or '').strip()))
|
|
|
|
|
if not normalized or normalized in {'.', '..'}:
|
|
|
|
|
return False
|
|
|
|
|
if normalized.startswith('/') or normalized.startswith('../') or '/../' in normalized:
|
|
|
|
|
return False
|
|
|
|
|
if normalized.startswith('.git/'):
|
|
|
|
|
return False
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
def _is_supported_generated_text_file(self, path: str) -> bool:
|
|
|
|
|
"""Return whether the generated path is a supported text artifact."""
|
|
|
|
|
normalized = PurePosixPath(path)
|
|
|
|
|
if normalized.name in self.GENERATED_TEXT_FILE_NAMES:
|
|
|
|
|
return True
|
|
|
|
|
return normalized.suffix.lower() in self.GENERATED_TEXT_FILE_SUFFIXES
|
|
|
|
|
|
|
|
|
|
def _collect_workspace_context(self) -> dict:
|
|
|
|
|
"""Collect a compact, text-only snapshot of the current project workspace."""
|
|
|
|
|
if not self.project_root.exists():
|
|
|
|
|
return {'has_existing_files': False, 'files': []}
|
|
|
|
|
|
|
|
|
|
files: list[dict] = []
|
|
|
|
|
total_chars = 0
|
|
|
|
|
for path in sorted(self.project_root.rglob('*')):
|
|
|
|
|
if not path.is_file():
|
|
|
|
|
continue
|
|
|
|
|
relative_path = path.relative_to(self.project_root).as_posix()
|
|
|
|
|
if relative_path == '.gitignore':
|
|
|
|
|
continue
|
|
|
|
|
if not self._is_safe_relative_path(relative_path) or not self._is_supported_generated_text_file(relative_path):
|
|
|
|
|
continue
|
|
|
|
|
try:
|
|
|
|
|
content = path.read_text(encoding='utf-8')
|
|
|
|
|
except (UnicodeDecodeError, OSError):
|
|
|
|
|
continue
|
|
|
|
|
remaining_chars = self.MAX_WORKSPACE_CONTEXT_CHARS - total_chars
|
|
|
|
|
if remaining_chars <= 0:
|
|
|
|
|
break
|
|
|
|
|
snippet = content[:remaining_chars]
|
|
|
|
|
files.append(
|
|
|
|
|
{
|
|
|
|
|
'path': relative_path,
|
|
|
|
|
'content': snippet,
|
|
|
|
|
'truncated': len(snippet) < len(content),
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
total_chars += len(snippet)
|
|
|
|
|
if len(files) >= self.MAX_WORKSPACE_CONTEXT_FILES:
|
|
|
|
|
break
|
|
|
|
|
return {'has_existing_files': bool(files), 'files': files}
|
|
|
|
|
|
|
|
|
|
def _parse_generated_files(self, content: str | None) -> dict[str, str]:
|
|
|
|
|
"""Parse an LLM file bundle response into relative-path/content pairs."""
|
|
|
|
|
if not content:
|
|
|
|
|
return {}
|
|
|
|
|
try:
|
|
|
|
|
parsed = json.loads(content)
|
|
|
|
|
except Exception:
|
|
|
|
|
return {}
|
|
|
|
|
generated: dict[str, str] = {}
|
|
|
|
|
if isinstance(parsed, dict) and isinstance(parsed.get('files'), list):
|
|
|
|
|
for item in parsed['files']:
|
|
|
|
|
if not isinstance(item, dict):
|
|
|
|
|
continue
|
|
|
|
|
path = str(item.get('path') or '').strip()
|
|
|
|
|
file_content = item.get('content')
|
|
|
|
|
if (
|
|
|
|
|
self._is_safe_relative_path(path)
|
|
|
|
|
and self._is_supported_generated_text_file(path)
|
|
|
|
|
and isinstance(file_content, str)
|
|
|
|
|
and file_content.strip()
|
|
|
|
|
):
|
|
|
|
|
generated[path] = file_content.rstrip() + "\n"
|
|
|
|
|
elif isinstance(parsed, dict):
|
|
|
|
|
for path, file_content in parsed.items():
|
|
|
|
|
normalized_path = str(path).strip()
|
|
|
|
|
if (
|
|
|
|
|
self._is_safe_relative_path(normalized_path)
|
|
|
|
|
and self._is_supported_generated_text_file(normalized_path)
|
|
|
|
|
and isinstance(file_content, str)
|
|
|
|
|
and file_content.strip()
|
|
|
|
|
):
|
|
|
|
|
generated[normalized_path] = file_content.rstrip() + "\n"
|
|
|
|
|
return generated
|
|
|
|
|
|
|
|
|
|
async def _generate_prompt_driven_files(self) -> tuple[dict[str, str], dict | None, bool]:
|
|
|
|
|
"""Use the configured LLM to generate prompt-specific project files."""
|
|
|
|
|
fallback_files = self._fallback_generated_files()
|
|
|
|
|
workspace_context = self._collect_workspace_context()
|
|
|
|
|
has_existing_files = bool(workspace_context.get('has_existing_files'))
|
|
|
|
|
if has_existing_files:
|
|
|
|
|
system_prompt = (
|
|
|
|
|
'You modify an existing software repository. '
|
|
|
|
|
'Return only JSON. Update the smallest necessary set of files to satisfy the new prompt. '
|
|
|
|
|
'Prefer editing existing files over inventing a new starter app. '
|
|
|
|
|
'Only return files that should be written. Omit unchanged files. '
|
|
|
|
|
'Use repository-relative paths and do not wrap the JSON in markdown fences.'
|
|
|
|
|
)
|
|
|
|
|
user_prompt = (
|
|
|
|
|
f"Project name: {self.project_name}\n"
|
|
|
|
|
f"Description: {self.description}\n"
|
|
|
|
|
f"Original prompt: {self.prompt_text or self.description}\n"
|
|
|
|
|
f"Requested features: {json.dumps(self.features)}\n"
|
|
|
|
|
f"Preferred tech stack: {json.dumps(self.tech_stack)}\n"
|
|
|
|
|
f"Related issue: {json.dumps(self.related_issue) if self.related_issue else 'null'}\n\n"
|
|
|
|
|
f"Current workspace snapshot:\n{json.dumps(workspace_context['files'], indent=2)}\n\n"
|
|
|
|
|
'Return JSON shaped as {"files": [{"path": "relative/path.py", "content": "..."}, ...]}. '
|
|
|
|
|
'Each file path must be relative to the repository root.'
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
system_prompt = (
|
|
|
|
|
'You generate small but concrete starter projects. '
|
|
|
|
|
'Return only JSON. Provide production-like but compact code that directly reflects the user request. '
|
|
|
|
|
'Include the files README.md, requirements.txt, main.py, and tests/test_app.py. '
|
|
|
|
|
'Use FastAPI for Python web requests unless the prompt clearly demands something else. '
|
|
|
|
|
'The test must verify a real behavior from main.py. '
|
|
|
|
|
'Do not wrap the JSON in markdown fences.'
|
|
|
|
|
)
|
|
|
|
|
user_prompt = (
|
|
|
|
|
f"Project name: {self.project_name}\n"
|
|
|
|
|
f"Description: {self.description}\n"
|
|
|
|
|
f"Original prompt: {self.prompt_text or self.description}\n"
|
|
|
|
|
f"Requested features: {json.dumps(self.features)}\n"
|
|
|
|
|
f"Preferred tech stack: {json.dumps(self.tech_stack)}\n"
|
|
|
|
|
f"Related issue: {json.dumps(self.related_issue) if self.related_issue else 'null'}\n\n"
|
|
|
|
|
'Return JSON shaped as {"files": [{"path": "README.md", "content": "..."}, ...]}. '
|
|
|
|
|
'At minimum include README.md, requirements.txt, main.py, and tests/test_app.py.'
|
|
|
|
|
)
|
|
|
|
|
content, trace = await LLMServiceClient().chat_with_trace(
|
|
|
|
|
stage='generation_plan',
|
|
|
|
|
system_prompt=system_prompt,
|
|
|
|
|
user_prompt=user_prompt,
|
|
|
|
|
tool_context_input={
|
|
|
|
|
'project_id': self.project_id,
|
|
|
|
|
'project_name': self.project_name,
|
|
|
|
|
'repository': self.ui_manager.ui_data.get('repository'),
|
|
|
|
|
'related_issue': self.related_issue,
|
|
|
|
|
'workspace_files': workspace_context.get('files', []),
|
|
|
|
|
},
|
|
|
|
|
expect_json=True,
|
|
|
|
|
)
|
|
|
|
|
generated_files = self._parse_generated_files(content)
|
|
|
|
|
if has_existing_files:
|
|
|
|
|
return generated_files, trace, True
|
|
|
|
|
merged_files = {**fallback_files, **generated_files}
|
|
|
|
|
return merged_files, trace, False
|
|
|
|
|
|
|
|
|
|
async def _sync_issue_context(self) -> None:
|
|
|
|
|
"""Sync repository issues and resolve a linked issue from the prompt when present."""
|
|
|
|
|
if not self.db_manager or not self.history:
|
|
|
|
|
@@ -212,6 +418,14 @@ class AgentOrchestrator:
|
|
|
|
|
self.db_manager.attach_issue_to_prompt(self.prompt_audit.id, self.related_issue)
|
|
|
|
|
|
|
|
|
|
async def _ensure_remote_repository(self) -> None:
|
|
|
|
|
repository = self.ui_manager.ui_data.get("repository") or {}
|
|
|
|
|
if self._repository_supports_remote_delivery(repository):
|
|
|
|
|
repository.setdefault("provider", "gitea")
|
|
|
|
|
repository.setdefault("status", "ready")
|
|
|
|
|
if repository.get("url"):
|
|
|
|
|
self.repo_url = repository.get("url")
|
|
|
|
|
self.ui_manager.ui_data["repository"] = repository
|
|
|
|
|
return
|
|
|
|
|
if not settings.use_project_repositories:
|
|
|
|
|
self.ui_manager.ui_data["repository"]["status"] = "shared"
|
|
|
|
|
if settings.gitea_repo:
|
|
|
|
|
@@ -303,9 +517,7 @@ class AgentOrchestrator:
|
|
|
|
|
async def _push_branch(self, branch: str) -> dict | None:
|
|
|
|
|
"""Push a branch to the configured project repository when available."""
|
|
|
|
|
repository = self.ui_manager.ui_data.get('repository') or {}
|
|
|
|
|
if repository.get('mode') != 'project':
|
|
|
|
|
return None
|
|
|
|
|
if repository.get('status') not in {'created', 'exists', 'ready'}:
|
|
|
|
|
if not self._repository_supports_remote_delivery(repository):
|
|
|
|
|
return None
|
|
|
|
|
if not settings.gitea_token or not self.repo_owner or not self.repo_name:
|
|
|
|
|
return None
|
|
|
|
|
@@ -352,7 +564,7 @@ class AgentOrchestrator:
|
|
|
|
|
self.ui_manager.ui_data['pull_request'] = self.active_pull_request
|
|
|
|
|
return self.active_pull_request
|
|
|
|
|
repository = self.ui_manager.ui_data.get('repository') or {}
|
|
|
|
|
if repository.get('mode') != 'project' or repository.get('status') not in {'created', 'exists', 'ready'}:
|
|
|
|
|
if not self._repository_supports_remote_delivery(repository):
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
title = f"AI delivery for {self.project_name}"
|
|
|
|
|
@@ -393,9 +605,7 @@ class AgentOrchestrator:
|
|
|
|
|
async def _push_remote_commit(self, commit_hash: str, commit_message: str, changed_files: list[str], base_commit: str | None) -> dict | None:
|
|
|
|
|
"""Push the local commit to the provisioned Gitea repository and build browser links."""
|
|
|
|
|
repository = self.ui_manager.ui_data.get("repository") or {}
|
|
|
|
|
if repository.get("mode") != "project":
|
|
|
|
|
return None
|
|
|
|
|
if repository.get("status") not in {"created", "exists", "ready"}:
|
|
|
|
|
if not self._repository_supports_remote_delivery(repository):
|
|
|
|
|
return None
|
|
|
|
|
push_result = await self._push_branch(self.branch_name)
|
|
|
|
|
if push_result is None:
|
|
|
|
|
@@ -455,6 +665,8 @@ class AgentOrchestrator:
|
|
|
|
|
target.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
change_type = "UPDATE" if target.exists() else "CREATE"
|
|
|
|
|
previous_content = target.read_text(encoding="utf-8") if target.exists() else ""
|
|
|
|
|
if previous_content == content:
|
|
|
|
|
return
|
|
|
|
|
diff_text = self._build_diff_text(relative_path, previous_content, content)
|
|
|
|
|
target.write_text(content, encoding="utf-8")
|
|
|
|
|
self.changed_files.append(relative_path)
|
|
|
|
|
@@ -468,34 +680,6 @@ class AgentOrchestrator:
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def _template_files(self) -> dict[str, str]:
|
|
|
|
|
feature_section = "\n".join(f"- {feature}" for feature in self.features) or "- None specified"
|
|
|
|
|
tech_section = "\n".join(f"- {tech}" for tech in self.tech_stack) or "- Python"
|
|
|
|
|
return {
|
|
|
|
|
".gitignore": "__pycache__/\n*.pyc\n.venv/\n.pytest_cache/\n.mypy_cache/\n",
|
|
|
|
|
"README.md": (
|
|
|
|
|
f"# {self.project_name}\n\n"
|
|
|
|
|
f"{self.description}\n\n"
|
|
|
|
|
"## Features\n"
|
|
|
|
|
f"{feature_section}\n\n"
|
|
|
|
|
"## Tech Stack\n"
|
|
|
|
|
f"{tech_section}\n"
|
|
|
|
|
),
|
|
|
|
|
"requirements.txt": "fastapi\nuvicorn\npytest\n",
|
|
|
|
|
"main.py": (
|
|
|
|
|
"from fastapi import FastAPI\n\n"
|
|
|
|
|
"app = FastAPI(title=\"Generated App\")\n\n"
|
|
|
|
|
"@app.get('/')\n"
|
|
|
|
|
"def read_root():\n"
|
|
|
|
|
f" return {{'name': '{self.project_name}', 'status': 'generated', 'features': {self.features!r}}}\n"
|
|
|
|
|
),
|
|
|
|
|
"tests/test_app.py": (
|
|
|
|
|
"from main import read_root\n\n"
|
|
|
|
|
"def test_read_root():\n"
|
|
|
|
|
f" assert read_root()['name'] == '{self.project_name}'\n"
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async def run(self) -> dict:
|
|
|
|
|
"""Run the software generation process with full audit logging."""
|
|
|
|
|
try:
|
|
|
|
|
@@ -585,18 +769,37 @@ class AgentOrchestrator:
|
|
|
|
|
async def _create_project_structure(self) -> None:
|
|
|
|
|
"""Create initial project structure."""
|
|
|
|
|
self.project_root.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
for relative_path, content in self._template_files().items():
|
|
|
|
|
if relative_path.startswith("main.py") or relative_path.startswith("tests/"):
|
|
|
|
|
continue
|
|
|
|
|
for relative_path, content in self._static_files().items():
|
|
|
|
|
self._write_file(relative_path, content)
|
|
|
|
|
self._append_log(f"Project structure created under {self.project_root}.")
|
|
|
|
|
|
|
|
|
|
async def _generate_code(self) -> None:
|
|
|
|
|
"""Generate code using Ollama."""
|
|
|
|
|
for relative_path, content in self._template_files().items():
|
|
|
|
|
if relative_path in {"main.py", "tests/test_app.py"}:
|
|
|
|
|
change_count_before = len(self.pending_code_changes)
|
|
|
|
|
generated_files, trace, editing_existing_workspace = await self._generate_prompt_driven_files()
|
|
|
|
|
for relative_path, content in generated_files.items():
|
|
|
|
|
self._write_file(relative_path, content)
|
|
|
|
|
self._append_log("Application entrypoint and smoke test generated.")
|
|
|
|
|
if editing_existing_workspace and len(self.pending_code_changes) == change_count_before:
|
|
|
|
|
raise RuntimeError('The LLM response did not produce any file changes for the existing project.')
|
|
|
|
|
fallback_used = bool(trace and trace.get('fallback_used')) or trace is None
|
|
|
|
|
if self.db_manager and self.history and self.prompt_audit and trace:
|
|
|
|
|
self.db_manager.log_llm_trace(
|
|
|
|
|
project_id=self.project_id,
|
|
|
|
|
history_id=self.history.id,
|
|
|
|
|
prompt_id=self.prompt_audit.id,
|
|
|
|
|
stage='code_generation',
|
|
|
|
|
provider=trace.get('provider', 'ollama'),
|
|
|
|
|
model=trace.get('model', settings.OLLAMA_MODEL),
|
|
|
|
|
system_prompt=trace.get('system_prompt', ''),
|
|
|
|
|
user_prompt=trace.get('user_prompt', self.prompt_text or self.description),
|
|
|
|
|
assistant_response=trace.get('assistant_response', ''),
|
|
|
|
|
raw_response=trace.get('raw_response'),
|
|
|
|
|
fallback_used=fallback_used,
|
|
|
|
|
)
|
|
|
|
|
if fallback_used:
|
|
|
|
|
self._append_log('LLM code generation was unavailable; used deterministic scaffolding fallback.')
|
|
|
|
|
else:
|
|
|
|
|
self._append_log('Application files generated from the prompt with the configured LLM.')
|
|
|
|
|
|
|
|
|
|
async def _run_tests(self) -> None:
|
|
|
|
|
"""Run tests for the generated code."""
|
|
|
|
|
|