fix: better code generation, refs NOISSUE
This commit is contained in:
@@ -3,6 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
import json
|
||||
import py_compile
|
||||
import re
|
||||
import subprocess
|
||||
@@ -14,12 +15,14 @@ try:
|
||||
from .database_manager import DatabaseManager
|
||||
from .git_manager import GitManager
|
||||
from .gitea import GiteaAPI
|
||||
from .llm_service import LLMServiceClient
|
||||
from .ui_manager import UIManager
|
||||
except ImportError:
|
||||
from config import settings
|
||||
from agents.database_manager import DatabaseManager
|
||||
from agents.git_manager import GitManager
|
||||
from agents.gitea import GiteaAPI
|
||||
from agents.llm_service import LLMServiceClient
|
||||
from agents.ui_manager import UIManager
|
||||
|
||||
|
||||
@@ -138,6 +141,40 @@ class AgentOrchestrator:
|
||||
if self.active_pull_request:
|
||||
self.ui_manager.ui_data["pull_request"] = self.active_pull_request
|
||||
|
||||
def _static_files(self) -> dict[str, str]:
|
||||
"""Files that do not need prompt-specific generation."""
|
||||
return {
|
||||
".gitignore": "__pycache__/\n*.pyc\n.venv/\n.pytest_cache/\n.mypy_cache/\n",
|
||||
}
|
||||
|
||||
def _fallback_generated_files(self) -> dict[str, str]:
|
||||
"""Deterministic fallback files when LLM generation is unavailable."""
|
||||
feature_section = "\n".join(f"- {feature}" for feature in self.features) or "- None specified"
|
||||
tech_section = "\n".join(f"- {tech}" for tech in self.tech_stack) or "- Python"
|
||||
return {
|
||||
"README.md": (
|
||||
f"# {self.project_name}\n\n"
|
||||
f"{self.description}\n\n"
|
||||
"## Features\n"
|
||||
f"{feature_section}\n\n"
|
||||
"## Tech Stack\n"
|
||||
f"{tech_section}\n"
|
||||
),
|
||||
"requirements.txt": "fastapi\nuvicorn\npytest\n",
|
||||
"main.py": (
|
||||
"from fastapi import FastAPI\n\n"
|
||||
"app = FastAPI(title=\"Generated App\")\n\n"
|
||||
"@app.get('/')\n"
|
||||
"def read_root():\n"
|
||||
f" return {{'name': '{self.project_name}', 'status': 'generated', 'features': {self.features!r}}}\n"
|
||||
),
|
||||
"tests/test_app.py": (
|
||||
"from main import read_root\n\n"
|
||||
"def test_read_root():\n"
|
||||
f" assert read_root()['name'] == '{self.project_name}'\n"
|
||||
),
|
||||
}
|
||||
|
||||
def _build_pr_branch_name(self, project_id: str) -> str:
|
||||
"""Build a stable branch name used until the PR is merged."""
|
||||
return f"ai/{project_id}"
|
||||
@@ -158,7 +195,7 @@ class AgentOrchestrator:
|
||||
"""Persist the current generation plan as an inspectable trace."""
|
||||
if not self.db_manager or not self.history or not self.prompt_audit:
|
||||
return
|
||||
planned_files = list(self._template_files().keys())
|
||||
planned_files = list(self._static_files().keys()) + list(self._fallback_generated_files().keys())
|
||||
self.db_manager.log_llm_trace(
|
||||
project_id=self.project_id,
|
||||
history_id=self.history.id,
|
||||
@@ -188,6 +225,66 @@ class AgentOrchestrator:
|
||||
fallback_used=False,
|
||||
)
|
||||
|
||||
def _parse_generated_files(self, content: str | None) -> dict[str, str]:
|
||||
"""Parse an LLM file bundle response into relative-path/content pairs."""
|
||||
if not content:
|
||||
return {}
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
except Exception:
|
||||
return {}
|
||||
allowed_paths = set(self._fallback_generated_files().keys())
|
||||
generated: dict[str, str] = {}
|
||||
if isinstance(parsed, dict) and isinstance(parsed.get('files'), list):
|
||||
for item in parsed['files']:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
path = str(item.get('path') or '').strip()
|
||||
file_content = item.get('content')
|
||||
if path in allowed_paths and isinstance(file_content, str) and file_content.strip():
|
||||
generated[path] = file_content.rstrip() + "\n"
|
||||
elif isinstance(parsed, dict):
|
||||
for path, file_content in parsed.items():
|
||||
if path in allowed_paths and isinstance(file_content, str) and file_content.strip():
|
||||
generated[str(path)] = file_content.rstrip() + "\n"
|
||||
return generated
|
||||
|
||||
async def _generate_prompt_driven_files(self) -> tuple[dict[str, str], dict | None]:
|
||||
"""Use the configured LLM to generate prompt-specific project files."""
|
||||
fallback_files = self._fallback_generated_files()
|
||||
system_prompt = (
|
||||
'You generate small but concrete starter projects. '
|
||||
'Return only JSON. Provide production-like but compact code that directly reflects the user request. '
|
||||
'Include the files README.md, requirements.txt, main.py, and tests/test_app.py. '
|
||||
'Use FastAPI for Python web requests unless the prompt clearly demands something else. '
|
||||
'The test must verify a real behavior from main.py. '
|
||||
'Do not wrap the JSON in markdown fences.'
|
||||
)
|
||||
user_prompt = (
|
||||
f"Project name: {self.project_name}\n"
|
||||
f"Description: {self.description}\n"
|
||||
f"Original prompt: {self.prompt_text or self.description}\n"
|
||||
f"Requested features: {json.dumps(self.features)}\n"
|
||||
f"Preferred tech stack: {json.dumps(self.tech_stack)}\n"
|
||||
f"Related issue: {json.dumps(self.related_issue) if self.related_issue else 'null'}\n\n"
|
||||
"Return JSON shaped as {\"files\": [{\"path\": \"README.md\", \"content\": \"...\"}, ...]}."
|
||||
)
|
||||
content, trace = await LLMServiceClient().chat_with_trace(
|
||||
stage='generation_plan',
|
||||
system_prompt=system_prompt,
|
||||
user_prompt=user_prompt,
|
||||
tool_context_input={
|
||||
'project_id': self.project_id,
|
||||
'project_name': self.project_name,
|
||||
'repository': self.ui_manager.ui_data.get('repository'),
|
||||
'related_issue': self.related_issue,
|
||||
},
|
||||
expect_json=True,
|
||||
)
|
||||
generated_files = self._parse_generated_files(content)
|
||||
merged_files = {**fallback_files, **generated_files}
|
||||
return merged_files, trace
|
||||
|
||||
async def _sync_issue_context(self) -> None:
|
||||
"""Sync repository issues and resolve a linked issue from the prompt when present."""
|
||||
if not self.db_manager or not self.history:
|
||||
@@ -468,34 +565,6 @@ class AgentOrchestrator:
|
||||
}
|
||||
)
|
||||
|
||||
def _template_files(self) -> dict[str, str]:
|
||||
feature_section = "\n".join(f"- {feature}" for feature in self.features) or "- None specified"
|
||||
tech_section = "\n".join(f"- {tech}" for tech in self.tech_stack) or "- Python"
|
||||
return {
|
||||
".gitignore": "__pycache__/\n*.pyc\n.venv/\n.pytest_cache/\n.mypy_cache/\n",
|
||||
"README.md": (
|
||||
f"# {self.project_name}\n\n"
|
||||
f"{self.description}\n\n"
|
||||
"## Features\n"
|
||||
f"{feature_section}\n\n"
|
||||
"## Tech Stack\n"
|
||||
f"{tech_section}\n"
|
||||
),
|
||||
"requirements.txt": "fastapi\nuvicorn\npytest\n",
|
||||
"main.py": (
|
||||
"from fastapi import FastAPI\n\n"
|
||||
"app = FastAPI(title=\"Generated App\")\n\n"
|
||||
"@app.get('/')\n"
|
||||
"def read_root():\n"
|
||||
f" return {{'name': '{self.project_name}', 'status': 'generated', 'features': {self.features!r}}}\n"
|
||||
),
|
||||
"tests/test_app.py": (
|
||||
"from main import read_root\n\n"
|
||||
"def test_read_root():\n"
|
||||
f" assert read_root()['name'] == '{self.project_name}'\n"
|
||||
),
|
||||
}
|
||||
|
||||
async def run(self) -> dict:
|
||||
"""Run the software generation process with full audit logging."""
|
||||
try:
|
||||
@@ -585,18 +654,34 @@ class AgentOrchestrator:
|
||||
async def _create_project_structure(self) -> None:
|
||||
"""Create initial project structure."""
|
||||
self.project_root.mkdir(parents=True, exist_ok=True)
|
||||
for relative_path, content in self._template_files().items():
|
||||
if relative_path.startswith("main.py") or relative_path.startswith("tests/"):
|
||||
continue
|
||||
for relative_path, content in self._static_files().items():
|
||||
self._write_file(relative_path, content)
|
||||
self._append_log(f"Project structure created under {self.project_root}.")
|
||||
|
||||
async def _generate_code(self) -> None:
|
||||
"""Generate code using Ollama."""
|
||||
for relative_path, content in self._template_files().items():
|
||||
if relative_path in {"main.py", "tests/test_app.py"}:
|
||||
generated_files, trace = await self._generate_prompt_driven_files()
|
||||
for relative_path, content in generated_files.items():
|
||||
self._write_file(relative_path, content)
|
||||
self._append_log("Application entrypoint and smoke test generated.")
|
||||
fallback_used = bool(trace and trace.get('fallback_used')) or trace is None
|
||||
if self.db_manager and self.history and self.prompt_audit and trace:
|
||||
self.db_manager.log_llm_trace(
|
||||
project_id=self.project_id,
|
||||
history_id=self.history.id,
|
||||
prompt_id=self.prompt_audit.id,
|
||||
stage='code_generation',
|
||||
provider=trace.get('provider', 'ollama'),
|
||||
model=trace.get('model', settings.OLLAMA_MODEL),
|
||||
system_prompt=trace.get('system_prompt', ''),
|
||||
user_prompt=trace.get('user_prompt', self.prompt_text or self.description),
|
||||
assistant_response=trace.get('assistant_response', ''),
|
||||
raw_response=trace.get('raw_response'),
|
||||
fallback_used=fallback_used,
|
||||
)
|
||||
if fallback_used:
|
||||
self._append_log('LLM code generation was unavailable; used deterministic scaffolding fallback.')
|
||||
else:
|
||||
self._append_log('Application files generated from the prompt with the configured LLM.')
|
||||
|
||||
async def _run_tests(self) -> None:
|
||||
"""Run tests for the generated code."""
|
||||
|
||||
@@ -404,8 +404,18 @@ async def _run_generation(
|
||||
fallback_used=summary_trace.get('fallback_used', False),
|
||||
)
|
||||
response_data['summary_message'] = summary_message
|
||||
response_data['summary_metadata'] = {
|
||||
'provider': summary_trace.get('provider'),
|
||||
'model': summary_trace.get('model'),
|
||||
'fallback_used': bool(summary_trace.get('fallback_used')),
|
||||
}
|
||||
response_data['pull_request'] = result.get('pull_request') or manager.get_open_pull_request(project_id=project_id)
|
||||
return {'status': result['status'], 'data': response_data, 'summary_message': summary_message}
|
||||
return {
|
||||
'status': result['status'],
|
||||
'data': response_data,
|
||||
'summary_message': summary_message,
|
||||
'summary_metadata': response_data['summary_metadata'],
|
||||
}
|
||||
|
||||
|
||||
def _project_root(project_id: str) -> Path:
|
||||
|
||||
Reference in New Issue
Block a user