Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c147d8be78 | |||
| 9ffaa18efe | |||
| d53f3fe207 | |||
| 4f1d757dd8 | |||
| ac75cc2e3a | |||
| f7f00d4e14 | |||
| 1c539d5f60 | |||
| 64fcd2967c | |||
| 4d050ff527 | |||
| 1944e2a9cf | |||
| 7e4066c609 | |||
| 4eeec5d808 | |||
| cbbed83915 | |||
| 1e72bc9a28 | |||
| b0c95323fd | |||
| d60e753acf | |||
| 94c38359c7 | |||
| 2943fc79ab | |||
| 3e40338bbf | |||
| 39f9651236 | |||
| 3175c53504 | |||
| 29cf2aa6bd |
@@ -12,7 +12,10 @@ WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
&& update-ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install dependencies
|
||||
|
||||
119
HISTORY.md
119
HISTORY.md
@@ -4,6 +4,125 @@ Changelog
|
||||
|
||||
(unreleased)
|
||||
------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Project association improvements, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
|
||||
0.9.10 (2026-04-11)
|
||||
-------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- More git integration fixes, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.9 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Add missing git binary, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.8 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- More file change fixes, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.7 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- More file generation improvements, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.6 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Repo onboarding fix, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.5 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Better code generation, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.4 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Add commit retry, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.3 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Better home assistant integration, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.2 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- UI improvements and prompt hardening, refs NOISSUE. [Simon
|
||||
Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.1 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Better repo name generation, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.0 (2026-04-11)
|
||||
------------------
|
||||
- Feat: editable guardrails, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
|
||||
|
||||
21
README.md
21
README.md
@@ -71,18 +71,11 @@ N8N_WEBHOOK_URL=http://n8n.yourserver.com/webhook/telegram
|
||||
TELEGRAM_BOT_TOKEN=your_telegram_bot_token
|
||||
TELEGRAM_CHAT_ID=your_chat_id
|
||||
|
||||
# Optional: queue Telegram prompts until Home Assistant reports battery/surplus targets are met.
|
||||
PROMPT_QUEUE_ENABLED=false
|
||||
PROMPT_QUEUE_AUTO_PROCESS=true
|
||||
PROMPT_QUEUE_FORCE_PROCESS=false
|
||||
PROMPT_QUEUE_POLL_INTERVAL_SECONDS=60
|
||||
PROMPT_QUEUE_MAX_BATCH_SIZE=1
|
||||
# Optional: Home Assistant integration.
|
||||
# Only the base URL and token are required in the environment.
|
||||
# Entity ids, thresholds, and queue behavior can be configured from the dashboard System tab and are stored in the database.
|
||||
HOME_ASSISTANT_URL=http://homeassistant.local:8123
|
||||
HOME_ASSISTANT_TOKEN=your_home_assistant_long_lived_token
|
||||
HOME_ASSISTANT_BATTERY_ENTITY_ID=sensor.home_battery_soc
|
||||
HOME_ASSISTANT_SURPLUS_ENTITY_ID=sensor.home_pv_surplus_power
|
||||
HOME_ASSISTANT_BATTERY_FULL_THRESHOLD=95
|
||||
HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS=100
|
||||
```
|
||||
|
||||
### Build and Run
|
||||
@@ -107,7 +100,7 @@ docker-compose up -d
|
||||
|
||||
The backend now interprets free-form Telegram text with Ollama before generation.
|
||||
If `TELEGRAM_CHAT_ID` is set, the Telegram-trigger workflow only reacts to messages from that specific chat.
|
||||
If `PROMPT_QUEUE_ENABLED=true`, Telegram prompts are stored in a durable queue and processed only when the Home Assistant battery and surplus thresholds are satisfied, unless you force processing via `/queue/process` or send `process_now=true`.
|
||||
If queueing is enabled from the dashboard System tab, Telegram prompts are stored in a durable queue and processed only when the configured Home Assistant battery and surplus thresholds are satisfied, unless you force processing via `/queue/process` or send `process_now=true`.
|
||||
|
||||
2. **Monitor progress via Web UI:**
|
||||
|
||||
@@ -121,7 +114,11 @@ If you deploy the container with PostgreSQL environment variables set, the servi
|
||||
|
||||
The health tab now shows separate application, n8n, Gitea, and Home Assistant/queue diagnostics so misconfigured integrations are visible without checking container logs.
|
||||
|
||||
The dashboard Health tab also exposes operator controls for the prompt queue, including manual batch processing, forced processing, and retrying failed items.
|
||||
The dashboard Health tab exposes operator controls for the prompt queue, including manual batch processing, forced processing, and retrying failed items.
|
||||
|
||||
The dashboard System tab now also stores Home Assistant entity ids, queue toggles, thresholds, and batch settings in the database, so the environment only needs `HOME_ASSISTANT_URL` and `HOME_ASSISTANT_TOKEN` for that integration.
|
||||
|
||||
Projects that show `uncommitted`, `local_only`, or `pushed_no_pr` delivery warnings in the dashboard can now be retried in place from the UI before resorting to purging orphan audit rows.
|
||||
|
||||
Guardrail and system prompts are no longer environment-only in practice: the factory can persist DB-backed overrides for the editable LLM prompt set, expose them at `/llm/prompts`, and edit them from the dashboard System tab. Environment values still act as defaults and as the reset target.
|
||||
|
||||
|
||||
@@ -43,18 +43,10 @@ TELEGRAM_BOT_TOKEN=your_telegram_bot_token
|
||||
TELEGRAM_CHAT_ID=your_chat_id
|
||||
|
||||
# Home Assistant energy gate for queued Telegram prompts
|
||||
# Leave PROMPT_QUEUE_ENABLED=false to preserve immediate Telegram processing.
|
||||
PROMPT_QUEUE_ENABLED=false
|
||||
PROMPT_QUEUE_AUTO_PROCESS=true
|
||||
PROMPT_QUEUE_FORCE_PROCESS=false
|
||||
PROMPT_QUEUE_POLL_INTERVAL_SECONDS=60
|
||||
PROMPT_QUEUE_MAX_BATCH_SIZE=1
|
||||
# Only the base URL and token are environment-backed.
|
||||
# Queue toggles, entity ids, thresholds, and batch sizing can be edited in the dashboard System tab and are stored in the database.
|
||||
HOME_ASSISTANT_URL=http://homeassistant.local:8123
|
||||
HOME_ASSISTANT_TOKEN=your_home_assistant_long_lived_token
|
||||
HOME_ASSISTANT_BATTERY_ENTITY_ID=sensor.home_battery_soc
|
||||
HOME_ASSISTANT_SURPLUS_ENTITY_ID=sensor.home_pv_surplus_power
|
||||
HOME_ASSISTANT_BATTERY_FULL_THRESHOLD=95
|
||||
HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS=100
|
||||
|
||||
# PostgreSQL
|
||||
# In production, provide PostgreSQL settings below. They now take precedence over the SQLite default.
|
||||
|
||||
@@ -75,18 +75,11 @@ N8N_WEBHOOK_URL=http://n8n.yourserver.com/webhook/telegram
|
||||
TELEGRAM_BOT_TOKEN=your_telegram_bot_token
|
||||
TELEGRAM_CHAT_ID=your_chat_id
|
||||
|
||||
# Optional: queue Telegram prompts until Home Assistant reports energy surplus.
|
||||
PROMPT_QUEUE_ENABLED=false
|
||||
PROMPT_QUEUE_AUTO_PROCESS=true
|
||||
PROMPT_QUEUE_FORCE_PROCESS=false
|
||||
PROMPT_QUEUE_POLL_INTERVAL_SECONDS=60
|
||||
PROMPT_QUEUE_MAX_BATCH_SIZE=1
|
||||
# Optional: Home Assistant integration.
|
||||
# Only the base URL and token are required in the environment.
|
||||
# Entity ids, thresholds, and queue behavior can be configured from the dashboard System tab and are stored in the database.
|
||||
HOME_ASSISTANT_URL=http://homeassistant.local:8123
|
||||
HOME_ASSISTANT_TOKEN=your_home_assistant_long_lived_token
|
||||
HOME_ASSISTANT_BATTERY_ENTITY_ID=sensor.home_battery_soc
|
||||
HOME_ASSISTANT_SURPLUS_ENTITY_ID=sensor.home_pv_surplus_power
|
||||
HOME_ASSISTANT_BATTERY_FULL_THRESHOLD=95
|
||||
HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS=100
|
||||
```
|
||||
|
||||
### Build and Run
|
||||
@@ -109,7 +102,9 @@ docker-compose up -d
|
||||
Features: user authentication, task CRUD, notifications
|
||||
```
|
||||
|
||||
If `PROMPT_QUEUE_ENABLED=true`, Telegram prompts are queued durably and processed only when Home Assistant reports the configured battery and surplus thresholds. Operators can override the gate via `/queue/process` or by sending `process_now=true` to `/generate/text`.
|
||||
If queueing is enabled from the dashboard System tab, Telegram prompts are queued durably and processed only when Home Assistant reports the configured battery and surplus thresholds. Operators can override the gate via `/queue/process` or by sending `process_now=true` to `/generate/text`.
|
||||
|
||||
The dashboard System tab stores Home Assistant entity ids, queue toggles, thresholds, and batch settings in the database, so the environment only needs `HOME_ASSISTANT_URL` and `HOME_ASSISTANT_TOKEN` for that integration.
|
||||
|
||||
2. **Monitor progress via Web UI:**
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
0.9.0
|
||||
0.9.11
|
||||
|
||||
@@ -4,7 +4,7 @@ from sqlalchemy.orm import Session
|
||||
from sqlalchemy import text
|
||||
|
||||
try:
|
||||
from ..config import EDITABLE_LLM_PROMPTS, settings
|
||||
from ..config import EDITABLE_LLM_PROMPTS, EDITABLE_RUNTIME_SETTINGS, settings
|
||||
from ..models import (
|
||||
AuditTrail,
|
||||
ProjectHistory,
|
||||
@@ -18,7 +18,7 @@ try:
|
||||
UserAction,
|
||||
)
|
||||
except ImportError:
|
||||
from config import EDITABLE_LLM_PROMPTS, settings
|
||||
from config import EDITABLE_LLM_PROMPTS, EDITABLE_RUNTIME_SETTINGS, settings
|
||||
from models import (
|
||||
AuditTrail,
|
||||
ProjectHistory,
|
||||
@@ -35,6 +35,7 @@ from datetime import datetime
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class DatabaseMigrations:
|
||||
@@ -87,6 +88,8 @@ class DatabaseManager:
|
||||
PROMPT_QUEUE_ACTION = 'PROMPT_QUEUED'
|
||||
PROMPT_CONFIG_PROJECT_ID = '__llm_prompt_config__'
|
||||
PROMPT_CONFIG_ACTION = 'LLM_PROMPT_CONFIG'
|
||||
RUNTIME_SETTINGS_PROJECT_ID = '__runtime_settings__'
|
||||
RUNTIME_SETTINGS_ACTION = 'RUNTIME_SETTING'
|
||||
|
||||
def __init__(self, db: Session):
|
||||
"""Initialize database manager."""
|
||||
@@ -122,6 +125,56 @@ class DatabaseManager:
|
||||
sanitized = sanitized.replace('--', '-')
|
||||
return sanitized.strip('-') or 'external-project'
|
||||
|
||||
@staticmethod
|
||||
def _partition_code_changes(raw_code_changes: list[dict], commits: list[dict]) -> tuple[list[dict], list[dict], list[dict]]:
|
||||
"""Split code changes into remotely delivered, local-only, and orphaned rows."""
|
||||
published_hashes = {
|
||||
commit.get('commit_hash')
|
||||
for commit in commits
|
||||
if commit.get('commit_hash') and (
|
||||
commit.get('remote_status') == 'pushed'
|
||||
or commit.get('imported_from_remote')
|
||||
or commit.get('commit_url')
|
||||
)
|
||||
}
|
||||
published_prompt_ids = {
|
||||
commit.get('prompt_id')
|
||||
for commit in commits
|
||||
if commit.get('prompt_id') is not None and (
|
||||
commit.get('remote_status') == 'pushed'
|
||||
or commit.get('imported_from_remote')
|
||||
or commit.get('commit_url')
|
||||
)
|
||||
}
|
||||
local_commit_hashes = {commit.get('commit_hash') for commit in commits if commit.get('commit_hash')}
|
||||
local_prompt_ids = {commit.get('prompt_id') for commit in commits if commit.get('prompt_id') is not None}
|
||||
visible_changes: list[dict] = []
|
||||
local_only_changes: list[dict] = []
|
||||
orphaned_changes: list[dict] = []
|
||||
for change in raw_code_changes:
|
||||
change_commit_hash = change.get('commit_hash')
|
||||
prompt_id = change.get('prompt_id')
|
||||
if (change_commit_hash and change_commit_hash in published_hashes) or (prompt_id is not None and prompt_id in published_prompt_ids):
|
||||
visible_changes.append(change)
|
||||
elif (change_commit_hash and change_commit_hash in local_commit_hashes) or (prompt_id is not None and prompt_id in local_prompt_ids):
|
||||
local_only_changes.append(change)
|
||||
else:
|
||||
orphaned_changes.append(change)
|
||||
return visible_changes, local_only_changes, orphaned_changes
|
||||
|
||||
@staticmethod
|
||||
def _dedupe_preserve_order(values: list[str | None]) -> list[str]:
|
||||
"""Return non-empty values in stable unique order."""
|
||||
result: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for value in values:
|
||||
normalized = (value or '').strip()
|
||||
if not normalized or normalized in seen:
|
||||
continue
|
||||
seen.add(normalized)
|
||||
result.append(normalized)
|
||||
return result
|
||||
|
||||
def get_project_by_repository(self, owner: str, repo_name: str, include_archived: bool = False) -> ProjectHistory | None:
|
||||
"""Return the project currently associated with a repository."""
|
||||
normalized_owner = (owner or '').strip().lower()
|
||||
@@ -464,6 +517,26 @@ class DatabaseManager:
|
||||
entries[key] = audit
|
||||
return entries
|
||||
|
||||
def _latest_runtime_setting_entries(self) -> dict[str, AuditTrail]:
|
||||
"""Return the most recent persisted audit row for each editable runtime setting key."""
|
||||
entries: dict[str, AuditTrail] = {}
|
||||
try:
|
||||
audits = (
|
||||
self.db.query(AuditTrail)
|
||||
.filter(AuditTrail.action == self.RUNTIME_SETTINGS_ACTION)
|
||||
.order_by(AuditTrail.created_at.desc(), AuditTrail.id.desc())
|
||||
.all()
|
||||
)
|
||||
except Exception:
|
||||
return entries
|
||||
for audit in audits:
|
||||
metadata = self._normalize_metadata(audit.metadata_json)
|
||||
key = str(metadata.get('key') or '').strip()
|
||||
if not key or key in entries or key not in EDITABLE_RUNTIME_SETTINGS:
|
||||
continue
|
||||
entries[key] = audit
|
||||
return entries
|
||||
|
||||
def get_llm_prompt_override(self, key: str) -> str | None:
|
||||
"""Return the persisted override for one editable LLM prompt key."""
|
||||
entry = self._latest_llm_prompt_config_entries().get(key)
|
||||
@@ -477,6 +550,16 @@ class DatabaseManager:
|
||||
return None
|
||||
return str(value)
|
||||
|
||||
def get_runtime_setting_override(self, key: str):
|
||||
"""Return the persisted override for one editable runtime setting key."""
|
||||
entry = self._latest_runtime_setting_entries().get(key)
|
||||
if entry is None:
|
||||
return None
|
||||
metadata = self._normalize_metadata(entry.metadata_json)
|
||||
if metadata.get('reset_to_default'):
|
||||
return None
|
||||
return metadata.get('value')
|
||||
|
||||
def get_llm_prompt_settings(self) -> list[dict]:
|
||||
"""Return editable LLM prompt definitions merged with persisted DB overrides."""
|
||||
latest = self._latest_llm_prompt_config_entries()
|
||||
@@ -502,6 +585,32 @@ class DatabaseManager:
|
||||
)
|
||||
return items
|
||||
|
||||
def get_runtime_settings(self) -> list[dict]:
|
||||
"""Return editable runtime settings merged with persisted DB overrides."""
|
||||
latest = self._latest_runtime_setting_entries()
|
||||
items = []
|
||||
for key, metadata in EDITABLE_RUNTIME_SETTINGS.items():
|
||||
entry = latest.get(key)
|
||||
entry_metadata = self._normalize_metadata(entry.metadata_json) if entry is not None else {}
|
||||
default_value = getattr(settings, key)
|
||||
persisted_value = None if entry_metadata.get('reset_to_default') else entry_metadata.get('value')
|
||||
items.append(
|
||||
{
|
||||
'key': key,
|
||||
'label': metadata['label'],
|
||||
'category': metadata['category'],
|
||||
'description': metadata['description'],
|
||||
'value_type': metadata['value_type'],
|
||||
'default_value': default_value,
|
||||
'value': persisted_value if persisted_value is not None else default_value,
|
||||
'source': 'database' if persisted_value is not None else 'environment',
|
||||
'updated_at': entry.created_at.isoformat() if entry and entry.created_at else None,
|
||||
'updated_by': entry.actor if entry is not None else None,
|
||||
'reset_to_default': bool(entry_metadata.get('reset_to_default')) if entry is not None else False,
|
||||
}
|
||||
)
|
||||
return items
|
||||
|
||||
def save_llm_prompt_setting(self, key: str, value: str, actor: str = 'dashboard') -> dict:
|
||||
"""Persist one editable LLM prompt override into the audit trail."""
|
||||
if key not in EDITABLE_LLM_PROMPTS:
|
||||
@@ -524,6 +633,28 @@ class DatabaseManager:
|
||||
self.db.refresh(audit)
|
||||
return {'status': 'success', 'setting': next(item for item in self.get_llm_prompt_settings() if item['key'] == key)}
|
||||
|
||||
def save_runtime_setting(self, key: str, value, actor: str = 'dashboard') -> dict:
|
||||
"""Persist one editable runtime setting override into the audit trail."""
|
||||
if key not in EDITABLE_RUNTIME_SETTINGS:
|
||||
return {'status': 'error', 'message': f'Unsupported runtime setting key: {key}'}
|
||||
audit = AuditTrail(
|
||||
project_id=self.RUNTIME_SETTINGS_PROJECT_ID,
|
||||
action=self.RUNTIME_SETTINGS_ACTION,
|
||||
actor=actor,
|
||||
action_type='UPDATE',
|
||||
details=f'Updated runtime setting {key}',
|
||||
message=f'Updated runtime setting {key}',
|
||||
metadata_json={
|
||||
'key': key,
|
||||
'value': value,
|
||||
'reset_to_default': False,
|
||||
},
|
||||
)
|
||||
self.db.add(audit)
|
||||
self.db.commit()
|
||||
self.db.refresh(audit)
|
||||
return {'status': 'success', 'setting': next(item for item in self.get_runtime_settings() if item['key'] == key)}
|
||||
|
||||
def reset_llm_prompt_setting(self, key: str, actor: str = 'dashboard') -> dict:
|
||||
"""Reset one editable LLM prompt override back to its environment/default value."""
|
||||
if key not in EDITABLE_LLM_PROMPTS:
|
||||
@@ -546,6 +677,28 @@ class DatabaseManager:
|
||||
self.db.refresh(audit)
|
||||
return {'status': 'success', 'setting': next(item for item in self.get_llm_prompt_settings() if item['key'] == key)}
|
||||
|
||||
def reset_runtime_setting(self, key: str, actor: str = 'dashboard') -> dict:
|
||||
"""Reset one editable runtime setting override back to its environment/default value."""
|
||||
if key not in EDITABLE_RUNTIME_SETTINGS:
|
||||
return {'status': 'error', 'message': f'Unsupported runtime setting key: {key}'}
|
||||
audit = AuditTrail(
|
||||
project_id=self.RUNTIME_SETTINGS_PROJECT_ID,
|
||||
action=self.RUNTIME_SETTINGS_ACTION,
|
||||
actor=actor,
|
||||
action_type='RESET',
|
||||
details=f'Reset runtime setting {key} to default',
|
||||
message=f'Reset runtime setting {key} to default',
|
||||
metadata_json={
|
||||
'key': key,
|
||||
'value': None,
|
||||
'reset_to_default': True,
|
||||
},
|
||||
)
|
||||
self.db.add(audit)
|
||||
self.db.commit()
|
||||
self.db.refresh(audit)
|
||||
return {'status': 'success', 'setting': next(item for item in self.get_runtime_settings() if item['key'] == key)}
|
||||
|
||||
def attach_issue_to_prompt(self, prompt_id: int, related_issue: dict) -> AuditTrail | None:
|
||||
"""Attach resolved issue context to a previously recorded prompt."""
|
||||
prompt = self.db.query(AuditTrail).filter(AuditTrail.id == prompt_id, AuditTrail.action == 'PROMPT_RECEIVED').first()
|
||||
@@ -1423,7 +1576,9 @@ class DatabaseManager:
|
||||
def log_code_change(self, project_id: str, change_type: str, file_path: str,
|
||||
actor: str, actor_type: str, details: str,
|
||||
history_id: int | None = None, prompt_id: int | None = None,
|
||||
diff_summary: str | None = None, diff_text: str | None = None) -> AuditTrail:
|
||||
diff_summary: str | None = None, diff_text: str | None = None,
|
||||
commit_hash: str | None = None, remote_status: str | None = None,
|
||||
branch: str | None = None) -> AuditTrail:
|
||||
"""Log a code change."""
|
||||
audit = AuditTrail(
|
||||
project_id=project_id,
|
||||
@@ -1442,6 +1597,9 @@ class DatabaseManager:
|
||||
"details": details,
|
||||
"diff_summary": diff_summary,
|
||||
"diff_text": diff_text,
|
||||
"commit_hash": commit_hash,
|
||||
"remote_status": remote_status,
|
||||
"branch": branch,
|
||||
}
|
||||
)
|
||||
self.db.add(audit)
|
||||
@@ -2114,6 +2272,7 @@ class DatabaseManager:
|
||||
"timeline": [],
|
||||
"issues": [],
|
||||
"issue_work": [],
|
||||
"ui_data": {},
|
||||
}
|
||||
|
||||
# Get logs
|
||||
@@ -2132,16 +2291,44 @@ class DatabaseManager:
|
||||
).order_by(AuditTrail.created_at.desc()).all()
|
||||
|
||||
prompts = self.get_prompt_events(project_id=project_id)
|
||||
code_changes = self.get_code_changes(project_id=project_id)
|
||||
raw_code_changes = self.get_code_changes(project_id=project_id)
|
||||
commits = self.get_commits(project_id=project_id)
|
||||
pull_requests = self.get_pull_requests(project_id=project_id)
|
||||
llm_traces = self.get_llm_traces(project_id=project_id)
|
||||
correlations = self.get_prompt_change_correlations(project_id=project_id)
|
||||
code_changes, local_only_code_changes, orphan_code_changes = self._partition_code_changes(raw_code_changes, commits)
|
||||
ui_data = self._get_latest_ui_snapshot_data(history.id)
|
||||
repository = self._get_project_repository(history)
|
||||
timeline = self.get_project_timeline(project_id=project_id)
|
||||
repository_sync = self.get_repository_sync_status(project_id=project_id)
|
||||
issues = self.get_repository_issues(project_id=project_id)
|
||||
issue_work = self.get_issue_work_events(project_id=project_id)
|
||||
published_commits = [
|
||||
commit for commit in commits
|
||||
if commit.get('remote_status') == 'pushed' or commit.get('imported_from_remote') or commit.get('commit_url')
|
||||
]
|
||||
has_pull_request = any(pr.get('pr_state') == 'open' and not pr.get('merged') for pr in pull_requests)
|
||||
if orphan_code_changes:
|
||||
delivery_status = 'uncommitted'
|
||||
delivery_message = (
|
||||
f"{len(orphan_code_changes)} generated file change(s) were recorded without a matching git commit. "
|
||||
"These changes never reached a PR-backed delivery."
|
||||
)
|
||||
elif local_only_code_changes:
|
||||
delivery_status = 'local_only'
|
||||
delivery_message = (
|
||||
f"{len(local_only_code_changes)} generated file change(s) were committed only in the local workspace. "
|
||||
"No remote repo push was recorded for this prompt yet."
|
||||
)
|
||||
elif published_commits and repository and repository.get('mode') == 'project' and not has_pull_request:
|
||||
delivery_status = 'pushed_no_pr'
|
||||
delivery_message = 'Changes were pushed to the remote repository, but no pull request is currently tracked for review.'
|
||||
elif published_commits:
|
||||
delivery_status = 'delivered'
|
||||
delivery_message = 'Generated changes were published to the tracked repository and are reviewable through the recorded pull request.'
|
||||
else:
|
||||
delivery_status = 'pending'
|
||||
delivery_message = 'No git commit has been recorded for this project yet.'
|
||||
|
||||
return {
|
||||
"project": {
|
||||
@@ -2157,6 +2344,10 @@ class DatabaseManager:
|
||||
"repository": repository,
|
||||
"repository_sync": repository_sync,
|
||||
"open_pull_requests": len([pr for pr in pull_requests if pr["pr_state"] == "open" and not pr["merged"]]),
|
||||
"delivery_status": delivery_status,
|
||||
"delivery_message": delivery_message,
|
||||
"local_only_code_change_count": len(local_only_code_changes),
|
||||
"orphan_code_change_count": len(orphan_code_changes),
|
||||
"completed_at": history.completed_at.isoformat() if history.completed_at else None,
|
||||
"created_at": history.started_at.isoformat() if history.started_at else None
|
||||
},
|
||||
@@ -2195,6 +2386,8 @@ class DatabaseManager:
|
||||
],
|
||||
"prompts": prompts,
|
||||
"code_changes": code_changes,
|
||||
"local_only_code_changes": local_only_code_changes,
|
||||
"orphan_code_changes": orphan_code_changes,
|
||||
"commits": commits,
|
||||
"pull_requests": pull_requests,
|
||||
"llm_traces": llm_traces,
|
||||
@@ -2204,6 +2397,7 @@ class DatabaseManager:
|
||||
"repository_sync": repository_sync,
|
||||
"issues": issues,
|
||||
"issue_work": issue_work,
|
||||
"ui_data": ui_data,
|
||||
}
|
||||
|
||||
def get_prompt_events(self, project_id: str | None = None, limit: int = 100) -> list[dict]:
|
||||
@@ -2249,6 +2443,9 @@ class DatabaseManager:
|
||||
"history_id": self._normalize_metadata(change.metadata_json).get("history_id"),
|
||||
"diff_summary": self._normalize_metadata(change.metadata_json).get("diff_summary"),
|
||||
"diff_text": self._normalize_metadata(change.metadata_json).get("diff_text"),
|
||||
"commit_hash": self._normalize_metadata(change.metadata_json).get("commit_hash"),
|
||||
"remote_status": self._normalize_metadata(change.metadata_json).get("remote_status"),
|
||||
"branch": self._normalize_metadata(change.metadata_json).get("branch"),
|
||||
"timestamp": change.created_at.isoformat() if change.created_at else None,
|
||||
}
|
||||
for change in changes
|
||||
@@ -2258,8 +2455,21 @@ class DatabaseManager:
|
||||
"""Correlate prompts with the concrete code changes that followed them."""
|
||||
correlations = self._build_correlations_from_links(project_id=project_id, limit=limit)
|
||||
if correlations:
|
||||
return correlations
|
||||
return self._build_correlations_from_audit_fallback(project_id=project_id, limit=limit)
|
||||
return [
|
||||
correlation for correlation in correlations
|
||||
if any(
|
||||
commit.get('remote_status') == 'pushed' or commit.get('imported_from_remote') or commit.get('commit_url')
|
||||
for commit in correlation.get('commits', [])
|
||||
)
|
||||
]
|
||||
fallback = self._build_correlations_from_audit_fallback(project_id=project_id, limit=limit)
|
||||
return [
|
||||
correlation for correlation in fallback
|
||||
if any(
|
||||
commit.get('remote_status') == 'pushed' or commit.get('imported_from_remote') or commit.get('commit_url')
|
||||
for commit in correlation.get('commits', [])
|
||||
)
|
||||
]
|
||||
|
||||
def get_dashboard_snapshot(self, limit: int = 8) -> dict:
|
||||
"""Return DB-backed dashboard data for the UI."""
|
||||
@@ -2282,7 +2492,10 @@ class DatabaseManager:
|
||||
pass
|
||||
active_projects = self.get_all_projects()
|
||||
archived_projects = self.get_all_projects(archived_only=True)
|
||||
projects = active_projects[:limit]
|
||||
project_bundles = [self.get_project_audit_data(project.project_id) for project in active_projects[:limit]]
|
||||
archived_project_bundles = [self.get_project_audit_data(project.project_id) for project in archived_projects[:limit]]
|
||||
all_project_bundles = [self.get_project_audit_data(project.project_id) for project in active_projects]
|
||||
all_project_bundles.extend(self.get_project_audit_data(project.project_id) for project in archived_projects)
|
||||
system_logs = self.db.query(SystemLog).order_by(SystemLog.created_at.desc()).limit(limit).all()
|
||||
return {
|
||||
"summary": {
|
||||
@@ -2294,13 +2507,14 @@ class DatabaseManager:
|
||||
"prompt_events": self.db.query(AuditTrail).filter(AuditTrail.action == "PROMPT_RECEIVED").count(),
|
||||
"queued_prompts": queue_summary.get('queued', 0),
|
||||
"failed_queued_prompts": queue_summary.get('failed', 0),
|
||||
"code_changes": self.db.query(AuditTrail).filter(AuditTrail.action == "CODE_CHANGE").count(),
|
||||
"code_changes": sum(len(bundle.get('code_changes', [])) for bundle in all_project_bundles),
|
||||
"orphan_code_changes": sum(len(bundle.get('orphan_code_changes', [])) for bundle in all_project_bundles),
|
||||
"open_pull_requests": self.db.query(PullRequest).filter(PullRequest.pr_state == "open", PullRequest.merged.is_(False)).count(),
|
||||
"tracked_issues": self.db.query(AuditTrail).filter(AuditTrail.action == "REPOSITORY_ISSUE").count(),
|
||||
"issue_work_events": self.db.query(AuditTrail).filter(AuditTrail.action == "ISSUE_WORKED").count(),
|
||||
},
|
||||
"projects": [self.get_project_audit_data(project.project_id) for project in projects],
|
||||
"archived_projects": [self.get_project_audit_data(project.project_id) for project in archived_projects[:limit]],
|
||||
"projects": project_bundles,
|
||||
"archived_projects": archived_project_bundles,
|
||||
"system_logs": [
|
||||
{
|
||||
"id": log.id,
|
||||
@@ -2319,6 +2533,384 @@ class DatabaseManager:
|
||||
},
|
||||
}
|
||||
|
||||
def _build_commit_url(self, owner: str, repo_name: str, commit_hash: str) -> str | None:
|
||||
"""Build a browser commit URL from configured Gitea settings."""
|
||||
if not settings.gitea_url or not owner or not repo_name or not commit_hash:
|
||||
return None
|
||||
return f"{str(settings.gitea_url).rstrip('/')}/{owner}/{repo_name}/commit/{commit_hash}"
|
||||
|
||||
def _update_project_audit_rows_for_delivery(
|
||||
self,
|
||||
project_id: str,
|
||||
branch: str,
|
||||
owner: str,
|
||||
repo_name: str,
|
||||
code_change_ids: list[int],
|
||||
orphan_code_change_ids: list[int],
|
||||
published_commit_hashes: list[str],
|
||||
) -> None:
|
||||
"""Mark matching commit and code-change rows as remotely published."""
|
||||
commit_hashes = set(self._dedupe_preserve_order(published_commit_hashes))
|
||||
for commit_row in self.db.query(AuditTrail).filter(
|
||||
AuditTrail.project_id == project_id,
|
||||
AuditTrail.action == 'GIT_COMMIT',
|
||||
).all():
|
||||
metadata = self._normalize_metadata(commit_row.metadata_json)
|
||||
commit_hash = metadata.get('commit_hash')
|
||||
if not commit_hash or commit_hash not in commit_hashes:
|
||||
continue
|
||||
metadata['branch'] = branch
|
||||
metadata['remote_status'] = 'pushed'
|
||||
metadata['commit_url'] = self._build_commit_url(owner, repo_name, commit_hash)
|
||||
commit_row.metadata_json = metadata
|
||||
|
||||
retry_ids = set(code_change_ids)
|
||||
orphan_ids = set(orphan_code_change_ids)
|
||||
new_commit_hash = next(iter(commit_hashes), None)
|
||||
for change_row in self.db.query(AuditTrail).filter(
|
||||
AuditTrail.project_id == project_id,
|
||||
AuditTrail.action == 'CODE_CHANGE',
|
||||
).all():
|
||||
if change_row.id not in retry_ids:
|
||||
continue
|
||||
metadata = self._normalize_metadata(change_row.metadata_json)
|
||||
metadata['branch'] = branch
|
||||
metadata['remote_status'] = 'pushed'
|
||||
if change_row.id in orphan_ids and new_commit_hash:
|
||||
metadata['commit_hash'] = new_commit_hash
|
||||
change_row.metadata_json = metadata
|
||||
self.db.commit()
|
||||
|
||||
def _find_or_create_delivery_pull_request(
|
||||
self,
|
||||
history: ProjectHistory,
|
||||
gitea_api,
|
||||
owner: str,
|
||||
repo_name: str,
|
||||
branch: str,
|
||||
prompt_text: str | None,
|
||||
) -> dict:
|
||||
"""Return an open PR for the project branch, creating one if necessary."""
|
||||
existing = self.get_open_pull_request(project_id=history.project_id)
|
||||
if existing is not None:
|
||||
return existing
|
||||
|
||||
remote_prs = gitea_api.list_pull_requests_sync(owner=owner, repo=repo_name, state='open')
|
||||
if isinstance(remote_prs, list):
|
||||
for item in remote_prs:
|
||||
remote_head = ((item.get('head') or {}) if isinstance(item.get('head'), dict) else {})
|
||||
if remote_head.get('ref') != branch:
|
||||
continue
|
||||
pr = self.save_pr_data(
|
||||
history.id,
|
||||
{
|
||||
'pr_number': item.get('number') or item.get('id') or 0,
|
||||
'title': item.get('title') or f"AI delivery for {history.project_name}",
|
||||
'body': item.get('body') or '',
|
||||
'state': item.get('state', 'open'),
|
||||
'base': ((item.get('base') or {}) if isinstance(item.get('base'), dict) else {}).get('ref', 'main'),
|
||||
'user': ((item.get('user') or {}) if isinstance(item.get('user'), dict) else {}).get('login', 'system'),
|
||||
'pr_url': item.get('html_url') or gitea_api.build_pull_request_url(item.get('number') or item.get('id'), owner=owner, repo=repo_name),
|
||||
'merged': bool(item.get('merged')),
|
||||
'head': remote_head.get('ref'),
|
||||
},
|
||||
)
|
||||
return {
|
||||
'pr_number': pr.pr_number,
|
||||
'title': pr.pr_title,
|
||||
'body': pr.pr_body,
|
||||
'pr_url': pr.pr_url,
|
||||
'pr_state': pr.pr_state,
|
||||
'merged': pr.merged,
|
||||
}
|
||||
|
||||
title = f"AI delivery for {history.project_name}"
|
||||
body = (
|
||||
f"Automated software factory changes for {history.project_name}.\n\n"
|
||||
f"Prompt: {prompt_text or history.description}\n\n"
|
||||
f"Branch: {branch}"
|
||||
)
|
||||
created = gitea_api.create_pull_request_sync(
|
||||
title=title,
|
||||
body=body,
|
||||
owner=owner,
|
||||
repo=repo_name,
|
||||
base='main',
|
||||
head=branch,
|
||||
)
|
||||
if created.get('error'):
|
||||
raise RuntimeError(f"Unable to create pull request: {created.get('error')}")
|
||||
pr = self.save_pr_data(
|
||||
history.id,
|
||||
{
|
||||
'pr_number': created.get('number') or created.get('id') or 0,
|
||||
'title': created.get('title', title),
|
||||
'body': created.get('body', body),
|
||||
'state': created.get('state', 'open'),
|
||||
'base': ((created.get('base') or {}) if isinstance(created.get('base'), dict) else {}).get('ref', 'main'),
|
||||
'user': ((created.get('user') or {}) if isinstance(created.get('user'), dict) else {}).get('login', 'system'),
|
||||
'pr_url': created.get('html_url') or gitea_api.build_pull_request_url(created.get('number') or created.get('id'), owner=owner, repo=repo_name),
|
||||
'merged': bool(created.get('merged')),
|
||||
'head': branch,
|
||||
},
|
||||
)
|
||||
return {
|
||||
'pr_number': pr.pr_number,
|
||||
'title': pr.pr_title,
|
||||
'body': pr.pr_body,
|
||||
'pr_url': pr.pr_url,
|
||||
'pr_state': pr.pr_state,
|
||||
'merged': pr.merged,
|
||||
}
|
||||
|
||||
def retry_project_delivery(self, project_id: str) -> dict:
|
||||
"""Retry remote delivery for orphaned, local-only, or missing-PR project changes."""
|
||||
history = self.get_project_by_id(project_id)
|
||||
if history is None:
|
||||
return {'status': 'error', 'message': 'Project not found'}
|
||||
|
||||
audit_data = self.get_project_audit_data(project_id)
|
||||
project = audit_data.get('project') or {}
|
||||
delivery_status = project.get('delivery_status')
|
||||
if delivery_status not in {'uncommitted', 'local_only', 'pushed_no_pr'}:
|
||||
return {'status': 'success', 'message': 'No failed delivery state was found for this project.', 'project_id': project_id}
|
||||
|
||||
snapshot_data = self._get_latest_ui_snapshot_data(history.id)
|
||||
repository = self._get_project_repository(history) or {}
|
||||
if repository.get('mode') != 'project':
|
||||
return {'status': 'error', 'message': 'Only project-scoped repositories support delivery retry.', 'project_id': project_id}
|
||||
owner = repository.get('owner') or settings.gitea_owner
|
||||
repo_name = repository.get('name') or settings.gitea_repo
|
||||
if not owner or not repo_name or not settings.gitea_url or not settings.gitea_token:
|
||||
return {'status': 'error', 'message': 'Gitea repository settings are incomplete; cannot retry delivery.', 'project_id': project_id}
|
||||
|
||||
project_root = Path(snapshot_data.get('project_root') or (settings.projects_root / project_id)).expanduser().resolve()
|
||||
if not project_root.exists():
|
||||
return {'status': 'error', 'message': f'Project workspace does not exist at {project_root}', 'project_id': project_id}
|
||||
|
||||
try:
|
||||
from .git_manager import GitManager
|
||||
from .gitea import GiteaAPI
|
||||
except ImportError:
|
||||
from agents.git_manager import GitManager
|
||||
from agents.gitea import GiteaAPI
|
||||
|
||||
git_manager = GitManager(project_id=project_id, project_dir=str(project_root))
|
||||
if not git_manager.is_git_available():
|
||||
return {'status': 'error', 'message': 'git executable is not available in PATH', 'project_id': project_id}
|
||||
if not git_manager.has_repo():
|
||||
return {'status': 'error', 'message': 'Local git repository is missing; cannot retry delivery safely.', 'project_id': project_id}
|
||||
|
||||
commits = audit_data.get('commits', [])
|
||||
local_only_changes = audit_data.get('local_only_code_changes', [])
|
||||
orphan_changes = audit_data.get('orphan_code_changes', [])
|
||||
published_commits = [
|
||||
commit for commit in commits
|
||||
if commit.get('remote_status') == 'pushed' or commit.get('imported_from_remote') or commit.get('commit_url')
|
||||
]
|
||||
branch_candidates = [
|
||||
*(change.get('branch') for change in local_only_changes),
|
||||
*(change.get('branch') for change in orphan_changes),
|
||||
*(commit.get('branch') for commit in commits),
|
||||
((snapshot_data.get('git') or {}).get('active_branch') if isinstance(snapshot_data.get('git'), dict) else None),
|
||||
f'ai/{project_id}',
|
||||
]
|
||||
branch = self._dedupe_preserve_order(branch_candidates)[0]
|
||||
head = git_manager.current_head_or_none()
|
||||
if head is None:
|
||||
return {'status': 'error', 'message': 'Local repository has no commits; retry delivery cannot determine a safe base commit.', 'project_id': project_id}
|
||||
if git_manager.branch_exists(branch):
|
||||
git_manager.checkout_branch(branch)
|
||||
else:
|
||||
git_manager.checkout_branch(branch, create=True, start_point=head)
|
||||
|
||||
code_change_ids = [change['id'] for change in local_only_changes] + [change['id'] for change in orphan_changes]
|
||||
orphan_ids = [change['id'] for change in orphan_changes]
|
||||
published_commit_hashes = [commit.get('commit_hash') for commit in published_commits if commit.get('commit_hash')]
|
||||
|
||||
if orphan_changes:
|
||||
files_to_commit = self._dedupe_preserve_order([change.get('file_path') for change in orphan_changes])
|
||||
missing_files = [path for path in files_to_commit if not (project_root / path).exists()]
|
||||
if missing_files:
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': f"Cannot retry delivery because generated files are missing locally: {', '.join(missing_files)}",
|
||||
'project_id': project_id,
|
||||
}
|
||||
git_manager.add_files(files_to_commit)
|
||||
if not git_manager.get_status():
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': 'No local git changes remain for the orphaned files; purge them or regenerate the project.',
|
||||
'project_id': project_id,
|
||||
}
|
||||
commit_message = f"Retry AI delivery for prompt: {history.project_name}"
|
||||
retried_commit_hash = git_manager.commit(commit_message)
|
||||
prompt_id = max((change.get('prompt_id') for change in orphan_changes if change.get('prompt_id') is not None), default=None)
|
||||
self.log_commit(
|
||||
project_id=project_id,
|
||||
commit_message=commit_message,
|
||||
actor='dashboard',
|
||||
actor_type='operator',
|
||||
history_id=history.id,
|
||||
prompt_id=prompt_id,
|
||||
commit_hash=retried_commit_hash,
|
||||
changed_files=files_to_commit,
|
||||
branch=branch,
|
||||
remote_status='local-only',
|
||||
)
|
||||
published_commit_hashes.append(retried_commit_hash)
|
||||
|
||||
gitea_api = GiteaAPI(token=settings.gitea_token, base_url=settings.gitea_url, owner=owner, repo=repo_name)
|
||||
user = gitea_api.get_current_user_sync()
|
||||
if user.get('error'):
|
||||
return {'status': 'error', 'message': f"Unable to authenticate with Gitea: {user.get('error')}", 'project_id': project_id}
|
||||
clone_url = repository.get('clone_url') or gitea_api.build_repo_git_url(owner=owner, repo=repo_name)
|
||||
if not clone_url:
|
||||
return {'status': 'error', 'message': 'Repository clone URL could not be determined for retry delivery.', 'project_id': project_id}
|
||||
|
||||
try:
|
||||
git_manager.push_with_credentials(
|
||||
remote_url=clone_url,
|
||||
username=user.get('login') or 'git',
|
||||
password=settings.gitea_token,
|
||||
remote='origin',
|
||||
branch=branch,
|
||||
)
|
||||
except Exception as exc:
|
||||
self.log_system_event(component='git', level='ERROR', message=f'Retry delivery push failed for {project_id}: {exc}')
|
||||
return {'status': 'error', 'message': f'Remote git push failed: {exc}', 'project_id': project_id}
|
||||
|
||||
if not published_commit_hashes:
|
||||
head_commit = git_manager.current_head_or_none()
|
||||
if head_commit:
|
||||
published_commit_hashes.append(head_commit)
|
||||
|
||||
prompt_text = (audit_data.get('prompts') or [{}])[0].get('prompt_text') if audit_data.get('prompts') else None
|
||||
try:
|
||||
pull_request = self._find_or_create_delivery_pull_request(history, gitea_api, owner, repo_name, branch, prompt_text)
|
||||
except Exception as exc:
|
||||
self.log_system_event(component='gitea', level='ERROR', message=f'Retry delivery PR creation failed for {project_id}: {exc}')
|
||||
return {'status': 'error', 'message': str(exc), 'project_id': project_id}
|
||||
|
||||
self._update_project_audit_rows_for_delivery(
|
||||
project_id=project_id,
|
||||
branch=branch,
|
||||
owner=owner,
|
||||
repo_name=repo_name,
|
||||
code_change_ids=code_change_ids,
|
||||
orphan_code_change_ids=orphan_ids,
|
||||
published_commit_hashes=published_commit_hashes,
|
||||
)
|
||||
|
||||
refreshed_snapshot = dict(snapshot_data)
|
||||
refreshed_git = dict(refreshed_snapshot.get('git') or {})
|
||||
latest_commit_hash = self._dedupe_preserve_order(published_commit_hashes)[-1]
|
||||
latest_commit = dict(refreshed_git.get('latest_commit') or {})
|
||||
latest_commit.update(
|
||||
{
|
||||
'hash': latest_commit_hash,
|
||||
'scope': 'remote',
|
||||
'branch': branch,
|
||||
'commit_url': gitea_api.build_commit_url(latest_commit_hash, owner=owner, repo=repo_name),
|
||||
}
|
||||
)
|
||||
refreshed_git['latest_commit'] = latest_commit
|
||||
refreshed_git['active_branch'] = branch
|
||||
refreshed_git['remote_error'] = None
|
||||
refreshed_git['remote_push'] = {
|
||||
'status': 'pushed',
|
||||
'remote': clone_url,
|
||||
'branch': branch,
|
||||
'commit_url': latest_commit.get('commit_url'),
|
||||
'pull_request': pull_request,
|
||||
}
|
||||
refreshed_snapshot['git'] = refreshed_git
|
||||
refreshed_repository = dict(repository)
|
||||
refreshed_repository['last_commit_url'] = latest_commit.get('commit_url')
|
||||
refreshed_snapshot['repository'] = refreshed_repository
|
||||
refreshed_snapshot['pull_request'] = pull_request
|
||||
refreshed_snapshot['project_root'] = str(project_root)
|
||||
self.save_ui_snapshot(history.id, refreshed_snapshot)
|
||||
self._log_audit_trail(
|
||||
project_id=project_id,
|
||||
action='DELIVERY_RETRIED',
|
||||
actor='dashboard',
|
||||
action_type='RETRY',
|
||||
details=f'Retried remote delivery for branch {branch}',
|
||||
message='Remote delivery retried successfully',
|
||||
metadata_json={
|
||||
'history_id': history.id,
|
||||
'branch': branch,
|
||||
'commit_hashes': self._dedupe_preserve_order(published_commit_hashes),
|
||||
'pull_request': pull_request,
|
||||
},
|
||||
)
|
||||
self.log_system_event(component='git', level='INFO', message=f'Retried remote delivery for {project_id} on {branch}')
|
||||
return {
|
||||
'status': 'success',
|
||||
'message': 'Remote delivery retried successfully.',
|
||||
'project_id': project_id,
|
||||
'branch': branch,
|
||||
'commit_hashes': self._dedupe_preserve_order(published_commit_hashes),
|
||||
'pull_request': pull_request,
|
||||
}
|
||||
|
||||
def cleanup_orphan_code_changes(self, project_id: str | None = None) -> dict:
|
||||
"""Delete code change rows that cannot be tied to any recorded commit."""
|
||||
change_query = self.db.query(AuditTrail).filter(AuditTrail.action == 'CODE_CHANGE')
|
||||
commit_query = self.db.query(AuditTrail).filter(AuditTrail.action == 'GIT_COMMIT')
|
||||
if project_id:
|
||||
change_query = change_query.filter(AuditTrail.project_id == project_id)
|
||||
commit_query = commit_query.filter(AuditTrail.project_id == project_id)
|
||||
|
||||
change_rows = change_query.all()
|
||||
commit_rows = commit_query.all()
|
||||
commits = [
|
||||
{
|
||||
'commit_hash': self._normalize_metadata(commit.metadata_json).get('commit_hash'),
|
||||
'prompt_id': self._normalize_metadata(commit.metadata_json).get('prompt_id'),
|
||||
}
|
||||
for commit in commit_rows
|
||||
]
|
||||
raw_code_changes = [
|
||||
{
|
||||
'id': change.id,
|
||||
'project_id': change.project_id,
|
||||
'prompt_id': self._normalize_metadata(change.metadata_json).get('prompt_id'),
|
||||
'commit_hash': self._normalize_metadata(change.metadata_json).get('commit_hash'),
|
||||
}
|
||||
for change in change_rows
|
||||
]
|
||||
_, _, orphaned_changes = self._partition_code_changes(raw_code_changes, commits)
|
||||
orphan_ids = [change['id'] for change in orphaned_changes]
|
||||
orphan_projects = sorted({change['project_id'] for change in orphaned_changes if change.get('project_id')})
|
||||
|
||||
if orphan_ids:
|
||||
self.db.query(PromptCodeLink).filter(PromptCodeLink.code_change_audit_id.in_(orphan_ids)).delete(synchronize_session=False)
|
||||
self.db.query(AuditTrail).filter(AuditTrail.id.in_(orphan_ids)).delete(synchronize_session=False)
|
||||
self.db.commit()
|
||||
self.log_system_event(
|
||||
component='audit',
|
||||
level='INFO',
|
||||
message=(
|
||||
f"Purged {len(orphan_ids)} orphaned code change audit row(s)"
|
||||
+ (f" for project {project_id}" if project_id else '')
|
||||
),
|
||||
)
|
||||
|
||||
return {
|
||||
'status': 'success',
|
||||
'deleted_count': len(orphan_ids),
|
||||
'project_count': len(orphan_projects),
|
||||
'projects': orphan_projects,
|
||||
'project_id': project_id,
|
||||
'message': (
|
||||
f"Purged {len(orphan_ids)} orphaned code change row(s)."
|
||||
if orphan_ids else 'No orphaned code change rows were found.'
|
||||
),
|
||||
}
|
||||
|
||||
def cleanup_audit_trail(self) -> None:
|
||||
"""Clear audit-related test data across all related tables."""
|
||||
self.db.query(PromptCodeLink).delete()
|
||||
|
||||
@@ -58,6 +58,18 @@ class GiteaAPI:
|
||||
"""Build a Gitea API URL from a relative path."""
|
||||
return f"{self.base_url}/api/v1/{path.lstrip('/')}"
|
||||
|
||||
def _normalize_pull_request_head(self, head: str | None, owner: str | None = None) -> str | None:
|
||||
"""Return a Gitea-compatible head ref for pull request creation."""
|
||||
normalized = (head or '').strip()
|
||||
if not normalized:
|
||||
return None
|
||||
if ':' in normalized:
|
||||
return normalized
|
||||
effective_owner = (owner or self.owner or '').strip()
|
||||
if not effective_owner:
|
||||
return normalized
|
||||
return f"{effective_owner}:{normalized}"
|
||||
|
||||
def build_repo_git_url(self, owner: str | None = None, repo: str | None = None) -> str | None:
|
||||
"""Build the clone URL for a repository."""
|
||||
_owner = owner or self.owner
|
||||
@@ -222,14 +234,36 @@ class GiteaAPI:
|
||||
"""Create a pull request."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
normalized_head = self._normalize_pull_request_head(head, _owner)
|
||||
payload = {
|
||||
"title": title,
|
||||
"body": body,
|
||||
"base": base,
|
||||
"head": head or f"{_owner}-{_repo}-ai-gen-{hash(title) % 10000}",
|
||||
"head": normalized_head or f"{_owner}:{_owner}-{_repo}-ai-gen-{hash(title) % 10000}",
|
||||
}
|
||||
return await self._request("POST", f"repos/{_owner}/{_repo}/pulls", payload)
|
||||
|
||||
def create_pull_request_sync(
|
||||
self,
|
||||
title: str,
|
||||
body: str,
|
||||
owner: str,
|
||||
repo: str,
|
||||
base: str = "main",
|
||||
head: str | None = None,
|
||||
) -> dict:
|
||||
"""Synchronously create a pull request."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
normalized_head = self._normalize_pull_request_head(head, _owner)
|
||||
payload = {
|
||||
"title": title,
|
||||
"body": body,
|
||||
"base": base,
|
||||
"head": normalized_head or f"{_owner}:{_owner}-{_repo}-ai-gen-{hash(title) % 10000}",
|
||||
}
|
||||
return self._request_sync("POST", f"repos/{_owner}/{_repo}/pulls", payload)
|
||||
|
||||
async def list_pull_requests(
|
||||
self,
|
||||
owner: str | None = None,
|
||||
@@ -402,3 +436,13 @@ class GiteaAPI:
|
||||
return {"error": "Repository name required for org operations"}
|
||||
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}")
|
||||
|
||||
def get_repo_info_sync(self, owner: str | None = None, repo: str | None = None) -> dict:
|
||||
"""Synchronously get repository information."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
|
||||
if not _repo:
|
||||
return {"error": "Repository name required for org operations"}
|
||||
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}")
|
||||
@@ -3,9 +3,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
import json
|
||||
import py_compile
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import PurePosixPath
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
@@ -14,18 +16,27 @@ try:
|
||||
from .database_manager import DatabaseManager
|
||||
from .git_manager import GitManager
|
||||
from .gitea import GiteaAPI
|
||||
from .llm_service import LLMServiceClient
|
||||
from .ui_manager import UIManager
|
||||
except ImportError:
|
||||
from config import settings
|
||||
from agents.database_manager import DatabaseManager
|
||||
from agents.git_manager import GitManager
|
||||
from agents.gitea import GiteaAPI
|
||||
from agents.llm_service import LLMServiceClient
|
||||
from agents.ui_manager import UIManager
|
||||
|
||||
|
||||
class AgentOrchestrator:
|
||||
"""Orchestrates the software generation process with full audit trail."""
|
||||
|
||||
REMOTE_READY_REPOSITORY_MODES = {'project', 'onboarded'}
|
||||
REMOTE_READY_REPOSITORY_STATUSES = {'created', 'exists', 'ready', 'onboarded'}
|
||||
GENERATED_TEXT_FILE_SUFFIXES = {'.py', '.md', '.txt', '.toml', '.yaml', '.yml', '.json', '.ini', '.cfg', '.sh', '.html', '.css', '.js', '.ts'}
|
||||
GENERATED_TEXT_FILE_NAMES = {'README', 'README.md', '.gitignore', 'requirements.txt', 'pyproject.toml', 'Dockerfile', 'Containerfile', 'Makefile'}
|
||||
MAX_WORKSPACE_CONTEXT_FILES = 20
|
||||
MAX_WORKSPACE_CONTEXT_CHARS = 24000
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
project_id: str,
|
||||
@@ -62,6 +73,7 @@ class AgentOrchestrator:
|
||||
self.repo_name_override = repo_name_override
|
||||
self.existing_history = existing_history
|
||||
self.changed_files: list[str] = []
|
||||
self.pending_code_changes: list[dict] = []
|
||||
self.gitea_api = GiteaAPI(
|
||||
token=settings.GITEA_TOKEN,
|
||||
base_url=settings.GITEA_URL,
|
||||
@@ -76,6 +88,7 @@ class AgentOrchestrator:
|
||||
self.branch_name = self._build_pr_branch_name(project_id)
|
||||
self.active_pull_request = None
|
||||
self._gitea_username: str | None = None
|
||||
existing_repository: dict | None = None
|
||||
hinted_issue_number = (related_issue_hint or {}).get('number') if related_issue_hint else None
|
||||
self.related_issue_number = hinted_issue_number if hinted_issue_number is not None else self._extract_issue_number(prompt_text)
|
||||
self.related_issue: dict | None = DatabaseManager._normalize_issue(related_issue_hint)
|
||||
@@ -106,9 +119,12 @@ class AgentOrchestrator:
|
||||
latest_ui = self.db_manager._get_latest_ui_snapshot_data(self.history.id)
|
||||
repository = latest_ui.get('repository') if isinstance(latest_ui, dict) else None
|
||||
if isinstance(repository, dict) and repository:
|
||||
existing_repository = dict(repository)
|
||||
self.repo_owner = repository.get('owner') or self.repo_owner
|
||||
self.repo_name = repository.get('name') or self.repo_name
|
||||
self.repo_url = repository.get('url') or self.repo_url
|
||||
git_state = latest_ui.get('git') if isinstance(latest_ui.get('git'), dict) else {}
|
||||
self.branch_name = git_state.get('active_branch') or self.branch_name
|
||||
if self.prompt_text:
|
||||
self.prompt_audit = self.db_manager.log_prompt_submission(
|
||||
history_id=self.history.id,
|
||||
@@ -117,6 +133,7 @@ class AgentOrchestrator:
|
||||
features=self.features,
|
||||
tech_stack=self.tech_stack,
|
||||
actor_name=self.prompt_actor,
|
||||
source=self.prompt_actor,
|
||||
related_issue={'number': self.related_issue_number} if self.related_issue_number is not None else None,
|
||||
source_context=self.prompt_source_context,
|
||||
routing=self.prompt_routing,
|
||||
@@ -125,18 +142,60 @@ class AgentOrchestrator:
|
||||
self.ui_manager.ui_data["project_root"] = str(self.project_root)
|
||||
self.ui_manager.ui_data["features"] = list(self.features)
|
||||
self.ui_manager.ui_data["tech_stack"] = list(self.tech_stack)
|
||||
self.ui_manager.ui_data["repository"] = {
|
||||
repository_ui = {
|
||||
"owner": self.repo_owner,
|
||||
"name": self.repo_name,
|
||||
"mode": "project" if settings.use_project_repositories else "shared",
|
||||
"status": "pending" if settings.use_project_repositories else "shared",
|
||||
"provider": "gitea",
|
||||
}
|
||||
if existing_repository:
|
||||
repository_ui.update(existing_repository)
|
||||
self.ui_manager.ui_data["repository"] = repository_ui
|
||||
if self.related_issue:
|
||||
self.ui_manager.ui_data["related_issue"] = self.related_issue
|
||||
if self.active_pull_request:
|
||||
self.ui_manager.ui_data["pull_request"] = self.active_pull_request
|
||||
|
||||
def _repository_supports_remote_delivery(self, repository: dict | None = None) -> bool:
|
||||
"""Return whether repository metadata supports git push and PR delivery."""
|
||||
repo = repository or self.ui_manager.ui_data.get('repository') or {}
|
||||
return repo.get('mode') in self.REMOTE_READY_REPOSITORY_MODES and repo.get('status') in self.REMOTE_READY_REPOSITORY_STATUSES
|
||||
|
||||
def _static_files(self) -> dict[str, str]:
|
||||
"""Files that do not need prompt-specific generation."""
|
||||
return {
|
||||
".gitignore": "__pycache__/\n*.pyc\n.venv/\n.pytest_cache/\n.mypy_cache/\n",
|
||||
}
|
||||
|
||||
def _fallback_generated_files(self) -> dict[str, str]:
|
||||
"""Deterministic fallback files when LLM generation is unavailable."""
|
||||
feature_section = "\n".join(f"- {feature}" for feature in self.features) or "- None specified"
|
||||
tech_section = "\n".join(f"- {tech}" for tech in self.tech_stack) or "- Python"
|
||||
return {
|
||||
"README.md": (
|
||||
f"# {self.project_name}\n\n"
|
||||
f"{self.description}\n\n"
|
||||
"## Features\n"
|
||||
f"{feature_section}\n\n"
|
||||
"## Tech Stack\n"
|
||||
f"{tech_section}\n"
|
||||
),
|
||||
"requirements.txt": "fastapi\nuvicorn\npytest\n",
|
||||
"main.py": (
|
||||
"from fastapi import FastAPI\n\n"
|
||||
"app = FastAPI(title=\"Generated App\")\n\n"
|
||||
"@app.get('/')\n"
|
||||
"def read_root():\n"
|
||||
f" return {{'name': '{self.project_name}', 'status': 'generated', 'features': {self.features!r}}}\n"
|
||||
),
|
||||
"tests/test_app.py": (
|
||||
"from main import read_root\n\n"
|
||||
"def test_read_root():\n"
|
||||
f" assert read_root()['name'] == '{self.project_name}'\n"
|
||||
),
|
||||
}
|
||||
|
||||
def _build_pr_branch_name(self, project_id: str) -> str:
|
||||
"""Build a stable branch name used until the PR is merged."""
|
||||
return f"ai/{project_id}"
|
||||
@@ -157,7 +216,7 @@ class AgentOrchestrator:
|
||||
"""Persist the current generation plan as an inspectable trace."""
|
||||
if not self.db_manager or not self.history or not self.prompt_audit:
|
||||
return
|
||||
planned_files = list(self._template_files().keys())
|
||||
planned_files = list(self._static_files().keys()) + list(self._fallback_generated_files().keys())
|
||||
self.db_manager.log_llm_trace(
|
||||
project_id=self.project_id,
|
||||
history_id=self.history.id,
|
||||
@@ -187,6 +246,175 @@ class AgentOrchestrator:
|
||||
fallback_used=False,
|
||||
)
|
||||
|
||||
def _is_safe_relative_path(self, path: str) -> bool:
|
||||
"""Return whether a generated file path is safe to write under the project root."""
|
||||
normalized = str(PurePosixPath((path or '').strip()))
|
||||
if not normalized or normalized in {'.', '..'}:
|
||||
return False
|
||||
if normalized.startswith('/') or normalized.startswith('../') or '/../' in normalized:
|
||||
return False
|
||||
if normalized.startswith('.git/'):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _is_supported_generated_text_file(self, path: str) -> bool:
|
||||
"""Return whether the generated path is a supported text artifact."""
|
||||
normalized = PurePosixPath(path)
|
||||
if normalized.name in self.GENERATED_TEXT_FILE_NAMES:
|
||||
return True
|
||||
return normalized.suffix.lower() in self.GENERATED_TEXT_FILE_SUFFIXES
|
||||
|
||||
def _collect_workspace_context(self) -> dict:
|
||||
"""Collect a compact, text-only snapshot of the current project workspace."""
|
||||
if not self.project_root.exists():
|
||||
return {'has_existing_files': False, 'files': []}
|
||||
|
||||
files: list[dict] = []
|
||||
total_chars = 0
|
||||
for path in sorted(self.project_root.rglob('*')):
|
||||
if not path.is_file():
|
||||
continue
|
||||
relative_path = path.relative_to(self.project_root).as_posix()
|
||||
if relative_path == '.gitignore':
|
||||
continue
|
||||
if not self._is_safe_relative_path(relative_path) or not self._is_supported_generated_text_file(relative_path):
|
||||
continue
|
||||
try:
|
||||
content = path.read_text(encoding='utf-8')
|
||||
except (UnicodeDecodeError, OSError):
|
||||
continue
|
||||
remaining_chars = self.MAX_WORKSPACE_CONTEXT_CHARS - total_chars
|
||||
if remaining_chars <= 0:
|
||||
break
|
||||
snippet = content[:remaining_chars]
|
||||
files.append(
|
||||
{
|
||||
'path': relative_path,
|
||||
'content': snippet,
|
||||
'truncated': len(snippet) < len(content),
|
||||
}
|
||||
)
|
||||
total_chars += len(snippet)
|
||||
if len(files) >= self.MAX_WORKSPACE_CONTEXT_FILES:
|
||||
break
|
||||
return {'has_existing_files': bool(files), 'files': files}
|
||||
|
||||
def _parse_generated_files(self, content: str | None) -> dict[str, str]:
|
||||
"""Parse an LLM file bundle response into relative-path/content pairs."""
|
||||
if not content:
|
||||
return {}
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
except Exception:
|
||||
return {}
|
||||
generated: dict[str, str] = {}
|
||||
if isinstance(parsed, dict) and isinstance(parsed.get('files'), list):
|
||||
for item in parsed['files']:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
path = str(item.get('path') or '').strip()
|
||||
file_content = item.get('content')
|
||||
if (
|
||||
self._is_safe_relative_path(path)
|
||||
and self._is_supported_generated_text_file(path)
|
||||
and isinstance(file_content, str)
|
||||
and file_content.strip()
|
||||
):
|
||||
generated[path] = file_content.rstrip() + "\n"
|
||||
elif isinstance(parsed, dict):
|
||||
for path, file_content in parsed.items():
|
||||
normalized_path = str(path).strip()
|
||||
if (
|
||||
self._is_safe_relative_path(normalized_path)
|
||||
and self._is_supported_generated_text_file(normalized_path)
|
||||
and isinstance(file_content, str)
|
||||
and file_content.strip()
|
||||
):
|
||||
generated[normalized_path] = file_content.rstrip() + "\n"
|
||||
return generated
|
||||
|
||||
async def _generate_prompt_driven_files(self) -> tuple[dict[str, str], dict | None, bool]:
|
||||
"""Use the configured LLM to generate prompt-specific project files."""
|
||||
fallback_files = self._fallback_generated_files()
|
||||
workspace_context = self._collect_workspace_context()
|
||||
has_existing_files = bool(workspace_context.get('has_existing_files'))
|
||||
if has_existing_files:
|
||||
system_prompt = (
|
||||
'You modify an existing software repository. '
|
||||
'Return only JSON. Update the smallest necessary set of files to satisfy the new prompt. '
|
||||
'Prefer editing existing files over inventing a new starter app. '
|
||||
'Only return files that should be written. Omit unchanged files. '
|
||||
'Use repository-relative paths and do not wrap the JSON in markdown fences.'
|
||||
)
|
||||
user_prompt = (
|
||||
f"Project name: {self.project_name}\n"
|
||||
f"Description: {self.description}\n"
|
||||
f"Original prompt: {self.prompt_text or self.description}\n"
|
||||
f"Requested features: {json.dumps(self.features)}\n"
|
||||
f"Preferred tech stack: {json.dumps(self.tech_stack)}\n"
|
||||
f"Related issue: {json.dumps(self.related_issue) if self.related_issue else 'null'}\n\n"
|
||||
f"Current workspace snapshot:\n{json.dumps(workspace_context['files'], indent=2)}\n\n"
|
||||
'Return JSON shaped as {"files": [{"path": "relative/path.py", "content": "..."}, ...]}. '
|
||||
'Each file path must be relative to the repository root.'
|
||||
)
|
||||
else:
|
||||
system_prompt = (
|
||||
'You generate small but concrete starter projects. '
|
||||
'Return only JSON. Provide production-like but compact code that directly reflects the user request. '
|
||||
'Include the files README.md, requirements.txt, main.py, and tests/test_app.py. '
|
||||
'Use FastAPI for Python web requests unless the prompt clearly demands something else. '
|
||||
'The test must verify a real behavior from main.py. '
|
||||
'Do not wrap the JSON in markdown fences.'
|
||||
)
|
||||
user_prompt = (
|
||||
f"Project name: {self.project_name}\n"
|
||||
f"Description: {self.description}\n"
|
||||
f"Original prompt: {self.prompt_text or self.description}\n"
|
||||
f"Requested features: {json.dumps(self.features)}\n"
|
||||
f"Preferred tech stack: {json.dumps(self.tech_stack)}\n"
|
||||
f"Related issue: {json.dumps(self.related_issue) if self.related_issue else 'null'}\n\n"
|
||||
'Return JSON shaped as {"files": [{"path": "README.md", "content": "..."}, ...]}. '
|
||||
'At minimum include README.md, requirements.txt, main.py, and tests/test_app.py.'
|
||||
)
|
||||
content, trace = await LLMServiceClient().chat_with_trace(
|
||||
stage='generation_plan',
|
||||
system_prompt=system_prompt,
|
||||
user_prompt=user_prompt,
|
||||
tool_context_input={
|
||||
'project_id': self.project_id,
|
||||
'project_name': self.project_name,
|
||||
'repository': self.ui_manager.ui_data.get('repository'),
|
||||
'related_issue': self.related_issue,
|
||||
'workspace_files': workspace_context.get('files', []),
|
||||
},
|
||||
expect_json=True,
|
||||
)
|
||||
raw_generated_paths = self._extract_raw_generated_paths(content)
|
||||
generated_files = self._parse_generated_files(content)
|
||||
accepted_paths = list(generated_files.keys())
|
||||
rejected_paths = [path for path in raw_generated_paths if path not in accepted_paths]
|
||||
generation_debug = {
|
||||
'raw_paths': raw_generated_paths,
|
||||
'accepted_paths': accepted_paths,
|
||||
'rejected_paths': rejected_paths,
|
||||
'existing_workspace': has_existing_files,
|
||||
}
|
||||
self.ui_manager.ui_data['generation_debug'] = generation_debug
|
||||
self._append_log(
|
||||
'LLM returned file candidates: '
|
||||
f"raw={raw_generated_paths or []}; accepted={accepted_paths or []}; rejected={rejected_paths or []}."
|
||||
)
|
||||
self._log_system_debug(
|
||||
'generation',
|
||||
'LLM file candidates '
|
||||
f"raw={raw_generated_paths or []}; accepted={accepted_paths or []}; rejected={rejected_paths or []}; "
|
||||
f"existing_workspace={has_existing_files}",
|
||||
)
|
||||
if has_existing_files:
|
||||
return generated_files, trace, True
|
||||
merged_files = {**fallback_files, **generated_files}
|
||||
return merged_files, trace, False
|
||||
|
||||
async def _sync_issue_context(self) -> None:
|
||||
"""Sync repository issues and resolve a linked issue from the prompt when present."""
|
||||
if not self.db_manager or not self.history:
|
||||
@@ -211,6 +439,14 @@ class AgentOrchestrator:
|
||||
self.db_manager.attach_issue_to_prompt(self.prompt_audit.id, self.related_issue)
|
||||
|
||||
async def _ensure_remote_repository(self) -> None:
|
||||
repository = self.ui_manager.ui_data.get("repository") or {}
|
||||
if self._repository_supports_remote_delivery(repository):
|
||||
repository.setdefault("provider", "gitea")
|
||||
repository.setdefault("status", "ready")
|
||||
if repository.get("url"):
|
||||
self.repo_url = repository.get("url")
|
||||
self.ui_manager.ui_data["repository"] = repository
|
||||
return
|
||||
if not settings.use_project_repositories:
|
||||
self.ui_manager.ui_data["repository"]["status"] = "shared"
|
||||
if settings.gitea_repo:
|
||||
@@ -302,9 +538,7 @@ class AgentOrchestrator:
|
||||
async def _push_branch(self, branch: str) -> dict | None:
|
||||
"""Push a branch to the configured project repository when available."""
|
||||
repository = self.ui_manager.ui_data.get('repository') or {}
|
||||
if repository.get('mode') != 'project':
|
||||
return None
|
||||
if repository.get('status') not in {'created', 'exists', 'ready'}:
|
||||
if not self._repository_supports_remote_delivery(repository):
|
||||
return None
|
||||
if not settings.gitea_token or not self.repo_owner or not self.repo_name:
|
||||
return None
|
||||
@@ -351,7 +585,7 @@ class AgentOrchestrator:
|
||||
self.ui_manager.ui_data['pull_request'] = self.active_pull_request
|
||||
return self.active_pull_request
|
||||
repository = self.ui_manager.ui_data.get('repository') or {}
|
||||
if repository.get('mode') != 'project' or repository.get('status') not in {'created', 'exists', 'ready'}:
|
||||
if not self._repository_supports_remote_delivery(repository):
|
||||
return None
|
||||
|
||||
title = f"AI delivery for {self.project_name}"
|
||||
@@ -360,6 +594,16 @@ class AgentOrchestrator:
|
||||
f"Prompt: {self.prompt_text or self.description}\n\n"
|
||||
f"Branch: {self.branch_name}"
|
||||
)
|
||||
pull_request_debug = self.ui_manager.ui_data.setdefault('git', {}).setdefault('pull_request_debug', {})
|
||||
pull_request_request = {
|
||||
'owner': self.repo_owner,
|
||||
'repo': self.repo_name,
|
||||
'title': title,
|
||||
'body': body,
|
||||
'base': 'main',
|
||||
'head': self.gitea_api._normalize_pull_request_head(self.branch_name, self.repo_owner) or self.branch_name,
|
||||
}
|
||||
pull_request_debug['request'] = pull_request_request
|
||||
result = await self.gitea_api.create_pull_request(
|
||||
title=title,
|
||||
body=body,
|
||||
@@ -368,7 +612,9 @@ class AgentOrchestrator:
|
||||
base='main',
|
||||
head=self.branch_name,
|
||||
)
|
||||
pull_request_debug['response'] = result
|
||||
if result.get('error'):
|
||||
pull_request_debug['status'] = 'error'
|
||||
raise RuntimeError(f"Unable to create pull request: {result.get('error')}")
|
||||
|
||||
pr_number = result.get('number') or result.get('id') or 0
|
||||
@@ -383,6 +629,8 @@ class AgentOrchestrator:
|
||||
'merged': bool(result.get('merged')),
|
||||
'pr_state': result.get('state', 'open'),
|
||||
}
|
||||
pull_request_debug['status'] = 'created'
|
||||
pull_request_debug['resolved'] = pr_data
|
||||
if self.db_manager and self.history:
|
||||
self.db_manager.save_pr_data(self.history.id, pr_data)
|
||||
self.active_pull_request = self.db_manager.get_open_pull_request(project_id=self.project_id) if self.db_manager else pr_data
|
||||
@@ -392,9 +640,7 @@ class AgentOrchestrator:
|
||||
async def _push_remote_commit(self, commit_hash: str, commit_message: str, changed_files: list[str], base_commit: str | None) -> dict | None:
|
||||
"""Push the local commit to the provisioned Gitea repository and build browser links."""
|
||||
repository = self.ui_manager.ui_data.get("repository") or {}
|
||||
if repository.get("mode") != "project":
|
||||
return None
|
||||
if repository.get("status") not in {"created", "exists", "ready"}:
|
||||
if not self._repository_supports_remote_delivery(repository):
|
||||
return None
|
||||
push_result = await self._push_branch(self.branch_name)
|
||||
if push_result is None:
|
||||
@@ -436,6 +682,35 @@ class AgentOrchestrator:
|
||||
if self.db_manager and self.history:
|
||||
self.db_manager._log_action(self.history.id, "INFO", message)
|
||||
|
||||
def _log_system_debug(self, component: str, message: str, level: str = 'INFO') -> None:
|
||||
"""Persist a system-level debug breadcrumb for generation and git decisions."""
|
||||
if not self.db_manager:
|
||||
return
|
||||
self.db_manager.log_system_event(component=component, level=level, message=f"{self.project_id}: {message}")
|
||||
|
||||
def _extract_raw_generated_paths(self, content: str | None) -> list[str]:
|
||||
"""Return all file paths proposed by the LLM response before safety filtering."""
|
||||
if not content:
|
||||
return []
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
except Exception:
|
||||
return []
|
||||
raw_paths: list[str] = []
|
||||
if isinstance(parsed, dict) and isinstance(parsed.get('files'), list):
|
||||
for item in parsed['files']:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
path = str(item.get('path') or '').strip()
|
||||
if path:
|
||||
raw_paths.append(path)
|
||||
elif isinstance(parsed, dict):
|
||||
for path in parsed.keys():
|
||||
normalized_path = str(path).strip()
|
||||
if normalized_path:
|
||||
raw_paths.append(normalized_path)
|
||||
return raw_paths
|
||||
|
||||
def _update_progress(self, progress: int, step: str, message: str) -> None:
|
||||
self.progress = progress
|
||||
self.current_step = step
|
||||
@@ -454,50 +729,20 @@ class AgentOrchestrator:
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
change_type = "UPDATE" if target.exists() else "CREATE"
|
||||
previous_content = target.read_text(encoding="utf-8") if target.exists() else ""
|
||||
if previous_content == content:
|
||||
return
|
||||
diff_text = self._build_diff_text(relative_path, previous_content, content)
|
||||
target.write_text(content, encoding="utf-8")
|
||||
self.changed_files.append(relative_path)
|
||||
if self.db_manager and self.history:
|
||||
self.db_manager.log_code_change(
|
||||
project_id=self.project_id,
|
||||
change_type=change_type,
|
||||
file_path=relative_path,
|
||||
actor="orchestrator",
|
||||
actor_type="agent",
|
||||
details=f"{change_type.title()}d generated artifact {relative_path}",
|
||||
history_id=self.history.id,
|
||||
prompt_id=self.prompt_audit.id if self.prompt_audit else None,
|
||||
diff_summary=f"Wrote {len(content.splitlines())} lines to {relative_path}",
|
||||
diff_text=diff_text,
|
||||
)
|
||||
|
||||
def _template_files(self) -> dict[str, str]:
|
||||
feature_section = "\n".join(f"- {feature}" for feature in self.features) or "- None specified"
|
||||
tech_section = "\n".join(f"- {tech}" for tech in self.tech_stack) or "- Python"
|
||||
return {
|
||||
".gitignore": "__pycache__/\n*.pyc\n.venv/\n.pytest_cache/\n.mypy_cache/\n",
|
||||
"README.md": (
|
||||
f"# {self.project_name}\n\n"
|
||||
f"{self.description}\n\n"
|
||||
"## Features\n"
|
||||
f"{feature_section}\n\n"
|
||||
"## Tech Stack\n"
|
||||
f"{tech_section}\n"
|
||||
),
|
||||
"requirements.txt": "fastapi\nuvicorn\npytest\n",
|
||||
"main.py": (
|
||||
"from fastapi import FastAPI\n\n"
|
||||
"app = FastAPI(title=\"Generated App\")\n\n"
|
||||
"@app.get('/')\n"
|
||||
"def read_root():\n"
|
||||
f" return {{'name': '{self.project_name}', 'status': 'generated', 'features': {self.features!r}}}\n"
|
||||
),
|
||||
"tests/test_app.py": (
|
||||
"from main import read_root\n\n"
|
||||
"def test_read_root():\n"
|
||||
f" assert read_root()['name'] == '{self.project_name}'\n"
|
||||
),
|
||||
self.pending_code_changes.append(
|
||||
{
|
||||
'change_type': change_type,
|
||||
'file_path': relative_path,
|
||||
'details': f"{change_type.title()}d generated artifact {relative_path}",
|
||||
'diff_summary': f"Wrote {len(content.splitlines())} lines to {relative_path}",
|
||||
'diff_text': diff_text,
|
||||
}
|
||||
)
|
||||
|
||||
async def run(self) -> dict:
|
||||
"""Run the software generation process with full audit logging."""
|
||||
@@ -588,18 +833,37 @@ class AgentOrchestrator:
|
||||
async def _create_project_structure(self) -> None:
|
||||
"""Create initial project structure."""
|
||||
self.project_root.mkdir(parents=True, exist_ok=True)
|
||||
for relative_path, content in self._template_files().items():
|
||||
if relative_path.startswith("main.py") or relative_path.startswith("tests/"):
|
||||
continue
|
||||
for relative_path, content in self._static_files().items():
|
||||
self._write_file(relative_path, content)
|
||||
self._append_log(f"Project structure created under {self.project_root}.")
|
||||
|
||||
async def _generate_code(self) -> None:
|
||||
"""Generate code using Ollama."""
|
||||
for relative_path, content in self._template_files().items():
|
||||
if relative_path in {"main.py", "tests/test_app.py"}:
|
||||
change_count_before = len(self.pending_code_changes)
|
||||
generated_files, trace, editing_existing_workspace = await self._generate_prompt_driven_files()
|
||||
for relative_path, content in generated_files.items():
|
||||
self._write_file(relative_path, content)
|
||||
self._append_log("Application entrypoint and smoke test generated.")
|
||||
if editing_existing_workspace and len(self.pending_code_changes) == change_count_before:
|
||||
raise RuntimeError('The LLM response did not produce any file changes for the existing project.')
|
||||
fallback_used = bool(trace and trace.get('fallback_used')) or trace is None
|
||||
if self.db_manager and self.history and self.prompt_audit and trace:
|
||||
self.db_manager.log_llm_trace(
|
||||
project_id=self.project_id,
|
||||
history_id=self.history.id,
|
||||
prompt_id=self.prompt_audit.id,
|
||||
stage='code_generation',
|
||||
provider=trace.get('provider', 'ollama'),
|
||||
model=trace.get('model', settings.OLLAMA_MODEL),
|
||||
system_prompt=trace.get('system_prompt', ''),
|
||||
user_prompt=trace.get('user_prompt', self.prompt_text or self.description),
|
||||
assistant_response=trace.get('assistant_response', ''),
|
||||
raw_response=trace.get('raw_response'),
|
||||
fallback_used=fallback_used,
|
||||
)
|
||||
if fallback_used:
|
||||
self._append_log('LLM code generation was unavailable; used deterministic scaffolding fallback.')
|
||||
else:
|
||||
self._append_log('Application files generated from the prompt with the configured LLM.')
|
||||
|
||||
async def _run_tests(self) -> None:
|
||||
"""Run tests for the generated code."""
|
||||
@@ -610,11 +874,25 @@ class AgentOrchestrator:
|
||||
async def _commit_to_git(self) -> None:
|
||||
"""Commit changes to git."""
|
||||
unique_files = list(dict.fromkeys(self.changed_files))
|
||||
git_debug = self.ui_manager.ui_data.setdefault('git', {})
|
||||
if not unique_files:
|
||||
git_debug.update({
|
||||
'commit_status': 'skipped',
|
||||
'early_exit_reason': 'changed_files_empty',
|
||||
'candidate_files': [],
|
||||
})
|
||||
self._append_log('Git commit skipped: no generated files were marked as changed.')
|
||||
self._log_system_debug('git', 'Commit exited early because changed_files was empty.')
|
||||
return
|
||||
if not self.git_manager.is_git_available():
|
||||
self.ui_manager.ui_data.setdefault('git', {})['error'] = 'git executable is not available in PATH'
|
||||
git_debug.update({
|
||||
'commit_status': 'error',
|
||||
'early_exit_reason': 'git_unavailable',
|
||||
'candidate_files': unique_files,
|
||||
'error': 'git executable is not available in PATH',
|
||||
})
|
||||
self._append_log('Git commit skipped: git executable is not available in PATH')
|
||||
self._log_system_debug('git', 'Commit exited early because git is unavailable.', level='ERROR')
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -622,7 +900,23 @@ class AgentOrchestrator:
|
||||
self.git_manager.init_repo()
|
||||
base_commit = self.git_manager.current_head_or_none()
|
||||
self.git_manager.add_files(unique_files)
|
||||
if not self.git_manager.get_status():
|
||||
status_after_add = self.git_manager.get_status()
|
||||
if not status_after_add:
|
||||
git_debug.update({
|
||||
'commit_status': 'skipped',
|
||||
'early_exit_reason': 'clean_after_staging',
|
||||
'candidate_files': unique_files,
|
||||
'status_after_add': '',
|
||||
})
|
||||
self._append_log(
|
||||
'Git commit skipped: working tree was clean after staging candidate files '
|
||||
f'{unique_files}. No repository diff was created.'
|
||||
)
|
||||
self._log_system_debug(
|
||||
'git',
|
||||
'Commit exited early because git status was clean after staging '
|
||||
f'files={unique_files}',
|
||||
)
|
||||
return
|
||||
|
||||
commit_message = f"AI generation for prompt: {self.project_name}"
|
||||
@@ -635,11 +929,17 @@ class AgentOrchestrator:
|
||||
"scope": "local",
|
||||
"branch": self.branch_name,
|
||||
}
|
||||
git_debug.update({
|
||||
'commit_status': 'committed',
|
||||
'early_exit_reason': None,
|
||||
'candidate_files': unique_files,
|
||||
'status_after_add': status_after_add,
|
||||
})
|
||||
remote_record = None
|
||||
try:
|
||||
remote_record = await self._push_remote_commit(commit_hash, commit_message, unique_files, base_commit)
|
||||
except (RuntimeError, subprocess.CalledProcessError, FileNotFoundError) as remote_exc:
|
||||
self.ui_manager.ui_data.setdefault("git", {})["remote_error"] = str(remote_exc)
|
||||
git_debug["remote_error"] = str(remote_exc)
|
||||
self._append_log(f"Remote git push skipped: {remote_exc}")
|
||||
|
||||
if remote_record:
|
||||
@@ -649,8 +949,8 @@ class AgentOrchestrator:
|
||||
if remote_record.get('pull_request'):
|
||||
commit_record['pull_request'] = remote_record['pull_request']
|
||||
self.ui_manager.ui_data['pull_request'] = remote_record['pull_request']
|
||||
self.ui_manager.ui_data.setdefault("git", {})["latest_commit"] = commit_record
|
||||
self.ui_manager.ui_data.setdefault("git", {})["commits"] = [commit_record]
|
||||
git_debug["latest_commit"] = commit_record
|
||||
git_debug["commits"] = [commit_record]
|
||||
self._append_log(f"Recorded git commit {commit_hash[:12]} for generated files.")
|
||||
if self.db_manager:
|
||||
self.db_manager.log_commit(
|
||||
@@ -668,6 +968,23 @@ class AgentOrchestrator:
|
||||
remote_status=remote_record.get("status") if remote_record else "local-only",
|
||||
related_issue=self.related_issue,
|
||||
)
|
||||
for change in self.pending_code_changes:
|
||||
self.db_manager.log_code_change(
|
||||
project_id=self.project_id,
|
||||
change_type=change['change_type'],
|
||||
file_path=change['file_path'],
|
||||
actor='orchestrator',
|
||||
actor_type='agent',
|
||||
details=change['details'],
|
||||
history_id=self.history.id if self.history else None,
|
||||
prompt_id=self.prompt_audit.id if self.prompt_audit else None,
|
||||
diff_summary=change.get('diff_summary'),
|
||||
diff_text=change.get('diff_text'),
|
||||
commit_hash=commit_hash,
|
||||
remote_status=remote_record.get('status') if remote_record else 'local-only',
|
||||
branch=self.branch_name,
|
||||
)
|
||||
self.pending_code_changes.clear()
|
||||
if self.related_issue:
|
||||
self.db_manager.log_issue_work(
|
||||
project_id=self.project_id,
|
||||
@@ -679,7 +996,12 @@ class AgentOrchestrator:
|
||||
commit_url=remote_record.get('commit_url') if remote_record else None,
|
||||
)
|
||||
except (RuntimeError, subprocess.CalledProcessError, FileNotFoundError) as exc:
|
||||
self.ui_manager.ui_data.setdefault("git", {})["error"] = str(exc)
|
||||
git_debug.update({
|
||||
'commit_status': 'error',
|
||||
'early_exit_reason': 'commit_exception',
|
||||
'candidate_files': unique_files,
|
||||
'error': str(exc),
|
||||
})
|
||||
self._append_log(f"Git commit skipped: {exc}")
|
||||
|
||||
async def _create_pr(self) -> None:
|
||||
|
||||
@@ -18,6 +18,25 @@ except ImportError:
|
||||
class RequestInterpreter:
|
||||
"""Use Ollama to turn free-form text into a structured software request."""
|
||||
|
||||
REQUEST_PREFIX_WORDS = {
|
||||
'a', 'an', 'app', 'application', 'build', 'create', 'dashboard', 'develop', 'design', 'for', 'generate',
|
||||
'internal', 'make', 'me', 'modern', 'need', 'new', 'our', 'platform', 'please', 'project', 'service',
|
||||
'simple', 'site', 'start', 'system', 'the', 'tool', 'us', 'want', 'web', 'website', 'with',
|
||||
}
|
||||
|
||||
REPO_NOISE_WORDS = REQUEST_PREFIX_WORDS | {'and', 'from', 'into', 'on', 'that', 'this', 'to'}
|
||||
GENERIC_PROJECT_NAME_WORDS = {
|
||||
'app', 'application', 'harness', 'platform', 'project', 'purpose', 'service', 'solution', 'suite', 'system', 'test', 'tool',
|
||||
}
|
||||
PLACEHOLDER_PROJECT_NAME_WORDS = {
|
||||
'generated project', 'new project', 'project', 'temporary name', 'temp name', 'placeholder', 'untitled project',
|
||||
}
|
||||
ROUTING_STOPWORDS = REPO_NOISE_WORDS | GENERIC_PROJECT_NAME_WORDS | {
|
||||
'about', 'after', 'again', 'appropriate', 'before', 'best', 'details', 'follow', 'following', 'implement',
|
||||
'integration', 'instance', 'instances', 'later', 'make', 'now', 'primary', 'primarily', 'probably',
|
||||
'remember', 'specific', 'suite', 'tearing', 'testing', 'through', 'used', 'using', 'workflow', 'workflows',
|
||||
}
|
||||
|
||||
def __init__(self, ollama_url: str | None = None, model: str | None = None):
|
||||
self.ollama_url = (ollama_url or settings.ollama_url).rstrip('/')
|
||||
self.model = model or settings.OLLAMA_MODEL
|
||||
@@ -82,6 +101,8 @@ class RequestInterpreter:
|
||||
parsed = json.loads(content)
|
||||
interpreted = self._normalize_interpreted_request(parsed, normalized)
|
||||
routing = self._normalize_routing(parsed.get('routing'), interpreted, compact_context)
|
||||
if routing.get('intent') == 'continue_project' and routing.get('project_name'):
|
||||
interpreted['name'] = routing['project_name']
|
||||
naming_trace = None
|
||||
if routing.get('intent') == 'new_project':
|
||||
interpreted, routing, naming_trace = await self._refine_new_project_identity(
|
||||
@@ -145,10 +166,11 @@ class RequestInterpreter:
|
||||
)
|
||||
if content:
|
||||
try:
|
||||
fallback_name = self._preferred_project_name_fallback(prompt_text, interpreted.get('name'))
|
||||
parsed = json.loads(content)
|
||||
project_name, repo_name = self._normalize_project_identity(
|
||||
parsed,
|
||||
fallback_name=interpreted.get('name') or self._derive_name(prompt_text),
|
||||
fallback_name=fallback_name,
|
||||
)
|
||||
repo_name = self._ensure_unique_repo_name(repo_name, constraints['repo_names'])
|
||||
interpreted['name'] = project_name
|
||||
@@ -158,7 +180,7 @@ class RequestInterpreter:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
fallback_name = interpreted.get('name') or self._derive_name(prompt_text)
|
||||
fallback_name = self._preferred_project_name_fallback(prompt_text, interpreted.get('name'))
|
||||
routing['project_name'] = fallback_name
|
||||
routing['repo_name'] = self._ensure_unique_repo_name(self._derive_repo_name(fallback_name), constraints['repo_names'])
|
||||
return interpreted, routing, trace
|
||||
@@ -250,6 +272,14 @@ class RequestInterpreter:
|
||||
matched_project = project
|
||||
break
|
||||
intent = str(routing.get('intent') or '').strip() or ('continue_project' if matched_project else 'new_project')
|
||||
if matched_project is None and intent == 'continue_project':
|
||||
recent_chat_history = context.get('recent_chat_history', [])
|
||||
recent_project_id = recent_chat_history[0].get('project_id') if recent_chat_history else None
|
||||
if recent_project_id:
|
||||
matched_project = next(
|
||||
(project for project in context.get('projects', []) if project.get('project_id') == recent_project_id),
|
||||
None,
|
||||
)
|
||||
normalized = {
|
||||
'intent': intent,
|
||||
'project_id': matched_project.get('project_id') if matched_project else project_id,
|
||||
@@ -280,13 +310,22 @@ class RequestInterpreter:
|
||||
noun_phrase = re.search(
|
||||
r'(?:build|create|start|make|develop|generate|design|need|want)\s+'
|
||||
r'(?:me\s+|us\s+|an?\s+|the\s+|new\s+|internal\s+|simple\s+|lightweight\s+|modern\s+|web\s+|mobile\s+)*'
|
||||
r'([a-z0-9][a-z0-9\s-]{2,80}?(?:portal|dashboard|app|application|service|tool|system|platform|api|bot|assistant|website|site|workspace|tracker|manager))\b',
|
||||
r'([a-z0-9][a-z0-9\s-]{2,80}?(?:portal|dashboard|app|application|service|tool|system|platform|api|bot|assistant|website|site|workspace|tracker|manager|harness|runner|framework|suite|pipeline|lab))\b',
|
||||
first_line,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
if noun_phrase:
|
||||
return self._humanize_name(noun_phrase.group(1))
|
||||
|
||||
focused_phrase = re.search(
|
||||
r'(?:purpose\s+is\s+to\s+create\s+(?:an?\s+)?)'
|
||||
r'([a-z0-9][a-z0-9\s-]{2,80}?(?:portal|dashboard|app|application|service|tool|system|platform|api|bot|assistant|website|site|workspace|tracker|manager|harness|runner|framework|suite|pipeline|lab))\b',
|
||||
first_line,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
if focused_phrase:
|
||||
return self._humanize_name(focused_phrase.group(1))
|
||||
|
||||
cleaned = re.sub(r'[^A-Za-z0-9 ]+', ' ', first_line)
|
||||
stopwords = {
|
||||
'build', 'create', 'start', 'make', 'develop', 'generate', 'design', 'need', 'want', 'please', 'for', 'our', 'with', 'that', 'this',
|
||||
@@ -301,6 +340,7 @@ class RequestInterpreter:
|
||||
"""Normalize a candidate project name into a readable title."""
|
||||
cleaned = re.sub(r'[^A-Za-z0-9\s-]+', ' ', raw_name).strip(' -')
|
||||
cleaned = re.sub(r'\s+', ' ', cleaned)
|
||||
cleaned = self._trim_request_prefix(cleaned)
|
||||
special_upper = {'api', 'crm', 'erp', 'cms', 'hr', 'it', 'ui', 'qa'}
|
||||
words = []
|
||||
for word in cleaned.split()[:6]:
|
||||
@@ -308,14 +348,79 @@ class RequestInterpreter:
|
||||
words.append(lowered.upper() if lowered in special_upper else lowered.capitalize())
|
||||
return ' '.join(words) or 'Generated Project'
|
||||
|
||||
def _trim_request_prefix(self, candidate: str) -> str:
|
||||
"""Remove leading request phrasing from model-produced names and slugs."""
|
||||
tokens = [token for token in re.split(r'[-\s]+', candidate or '') if token]
|
||||
while tokens and tokens[0].lower() in self.REQUEST_PREFIX_WORDS:
|
||||
tokens.pop(0)
|
||||
trimmed = ' '.join(tokens).strip()
|
||||
return trimmed or candidate.strip()
|
||||
|
||||
def _derive_repo_name(self, project_name: str) -> str:
|
||||
"""Derive a repository slug from a human-readable project name."""
|
||||
preferred = (project_name or 'project').strip().lower().replace(' ', '-')
|
||||
preferred_name = self._trim_request_prefix((project_name or 'project').strip())
|
||||
preferred = preferred_name.lower().replace(' ', '-')
|
||||
sanitized = ''.join(ch if ch.isalnum() or ch in {'-', '_'} else '-' for ch in preferred)
|
||||
while '--' in sanitized:
|
||||
sanitized = sanitized.replace('--', '-')
|
||||
return sanitized.strip('-') or 'project'
|
||||
|
||||
def _should_use_repo_name_candidate(self, candidate: str, project_name: str) -> bool:
|
||||
"""Return whether a model-proposed repo slug is concise enough to trust directly."""
|
||||
cleaned = self._trim_request_prefix(re.sub(r'[^A-Za-z0-9\s_-]+', ' ', candidate or '').strip())
|
||||
if not cleaned:
|
||||
return False
|
||||
candidate_tokens = [token.lower() for token in re.split(r'[-\s_]+', cleaned) if token]
|
||||
if not candidate_tokens:
|
||||
return False
|
||||
if len(candidate_tokens) > 6:
|
||||
return False
|
||||
noise_count = sum(1 for token in candidate_tokens if token in self.REPO_NOISE_WORDS)
|
||||
if noise_count >= 2:
|
||||
return False
|
||||
if len('-'.join(candidate_tokens)) > 40:
|
||||
return False
|
||||
project_tokens = {
|
||||
token.lower()
|
||||
for token in re.split(r'[-\s_]+', project_name or '')
|
||||
if token and token.lower() not in self.REPO_NOISE_WORDS
|
||||
}
|
||||
if project_tokens:
|
||||
overlap = sum(1 for token in candidate_tokens if token in project_tokens)
|
||||
if overlap == 0:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _should_use_project_name_candidate(self, candidate: str, fallback_name: str) -> bool:
|
||||
"""Return whether a model-proposed project title is concrete enough to trust."""
|
||||
cleaned = self._trim_request_prefix(re.sub(r'[^A-Za-z0-9\s-]+', ' ', candidate or '').strip())
|
||||
if not cleaned:
|
||||
return False
|
||||
candidate_tokens = [token.lower() for token in re.split(r'[-\s]+', cleaned) if token]
|
||||
if not candidate_tokens:
|
||||
return False
|
||||
if len(candidate_tokens) == 1 and candidate_tokens[0] in self.GENERIC_PROJECT_NAME_WORDS:
|
||||
return False
|
||||
if all(token in self.GENERIC_PROJECT_NAME_WORDS for token in candidate_tokens):
|
||||
return False
|
||||
fallback_tokens = {
|
||||
token.lower() for token in re.split(r'[-\s]+', fallback_name or '') if token and token.lower() not in self.REPO_NOISE_WORDS
|
||||
}
|
||||
if fallback_tokens and len(candidate_tokens) <= 2:
|
||||
overlap = sum(1 for token in candidate_tokens if token in fallback_tokens)
|
||||
if overlap == 0 and any(token in self.GENERIC_PROJECT_NAME_WORDS for token in candidate_tokens):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _preferred_project_name_fallback(self, prompt_text: str, interpreted_name: str | None) -> str:
|
||||
"""Pick the best fallback title when the earlier interpretation produced a placeholder."""
|
||||
interpreted_clean = self._humanize_name(str(interpreted_name or '').strip()) if interpreted_name else ''
|
||||
normalized_interpreted = interpreted_clean.lower()
|
||||
if normalized_interpreted and normalized_interpreted not in self.PLACEHOLDER_PROJECT_NAME_WORDS:
|
||||
if not (len(normalized_interpreted.split()) == 1 and normalized_interpreted in self.GENERIC_PROJECT_NAME_WORDS):
|
||||
return interpreted_clean
|
||||
return self._derive_name(prompt_text)
|
||||
|
||||
def _ensure_unique_repo_name(self, repo_name: str, reserved_names: set[str]) -> str:
|
||||
"""Choose a repository slug that does not collide with tracked or remote repositories."""
|
||||
base_name = self._derive_repo_name(repo_name)
|
||||
@@ -328,8 +433,15 @@ class RequestInterpreter:
|
||||
|
||||
def _normalize_project_identity(self, payload: dict, fallback_name: str) -> tuple[str, str]:
|
||||
"""Normalize model-proposed project and repository naming."""
|
||||
project_name = self._humanize_name(str(payload.get('project_name') or payload.get('name') or fallback_name))
|
||||
repo_name = self._derive_repo_name(str(payload.get('repo_name') or project_name))
|
||||
fallback_project_name = self._humanize_name(str(fallback_name or 'Generated Project'))
|
||||
project_candidate = str(payload.get('project_name') or payload.get('name') or '').strip()
|
||||
project_name = fallback_project_name
|
||||
if project_candidate and self._should_use_project_name_candidate(project_candidate, fallback_project_name):
|
||||
project_name = self._humanize_name(project_candidate)
|
||||
repo_candidate = str(payload.get('repo_name') or '').strip()
|
||||
repo_name = self._derive_repo_name(project_name)
|
||||
if repo_candidate and self._should_use_repo_name_candidate(repo_candidate, project_name):
|
||||
repo_name = self._derive_repo_name(repo_candidate)
|
||||
return project_name, repo_name
|
||||
|
||||
def _heuristic_fallback(self, prompt_text: str, context: dict | None = None) -> tuple[dict, dict]:
|
||||
@@ -361,6 +473,7 @@ class RequestInterpreter:
|
||||
projects = context.get('projects', [])
|
||||
last_project_id = recent_history[0].get('project_id') if recent_history else None
|
||||
last_issue = ((recent_history[0].get('related_issue') or {}).get('number') if recent_history else None)
|
||||
last_project = next((project for project in projects if project.get('project_id') == last_project_id), None) if last_project_id else None
|
||||
|
||||
matched_project = None
|
||||
for project in projects:
|
||||
@@ -374,8 +487,24 @@ class RequestInterpreter:
|
||||
break
|
||||
if matched_project is None and not explicit_new:
|
||||
follow_up_tokens = ['also', 'continue', 'for this project', 'for that project', 'work on this', 'work on that', 'fix that', 'add this']
|
||||
if any(token in lowered for token in follow_up_tokens) and last_project_id:
|
||||
matched_project = next((project for project in projects if project.get('project_id') == last_project_id), None)
|
||||
leading_follow_up = lowered.startswith(('also', 'now', 'continue', 'remember', 'then'))
|
||||
recent_overlap = 0
|
||||
if last_project is not None:
|
||||
recent_prompt_text = recent_history[0].get('prompt_text') or ''
|
||||
project_reference_text = ' '.join(
|
||||
part for part in [
|
||||
last_project.get('name') or '',
|
||||
last_project.get('description') or '',
|
||||
((last_project.get('repository') or {}).get('name') or ''),
|
||||
]
|
||||
if part
|
||||
)
|
||||
recent_overlap = len(
|
||||
self._routing_tokens(prompt_text)
|
||||
& (self._routing_tokens(recent_prompt_text) | self._routing_tokens(project_reference_text))
|
||||
)
|
||||
if last_project_id and (leading_follow_up or any(token in lowered for token in follow_up_tokens) or recent_overlap >= 2):
|
||||
matched_project = last_project
|
||||
issue_number = referenced_issue
|
||||
if issue_number is None and any(token in lowered for token in ['that issue', 'this issue', 'the issue']) and last_issue is not None:
|
||||
issue_number = last_issue
|
||||
@@ -390,6 +519,14 @@ class RequestInterpreter:
|
||||
'reasoning_summary': 'Heuristic routing from chat history and project names.',
|
||||
}
|
||||
|
||||
def _routing_tokens(self, text: str) -> set[str]:
|
||||
"""Extract meaningful tokens for heuristic continuation matching."""
|
||||
cleaned = re.sub(r'[^a-z0-9]+', ' ', (text or '').lower())
|
||||
return {
|
||||
token for token in cleaned.split()
|
||||
if len(token) >= 4 and token not in self.ROUTING_STOPWORDS
|
||||
}
|
||||
|
||||
def _extract_issue_number(self, prompt_text: str) -> int | None:
|
||||
match = re.search(r'(?:#|issue\s+)(\d+)', prompt_text, flags=re.IGNORECASE)
|
||||
return int(match.group(1)) if match else None
|
||||
@@ -60,6 +60,63 @@ EDITABLE_LLM_PROMPTS: dict[str, dict[str, str]] = {
|
||||
},
|
||||
}
|
||||
|
||||
EDITABLE_RUNTIME_SETTINGS: dict[str, dict[str, str]] = {
|
||||
'HOME_ASSISTANT_BATTERY_ENTITY_ID': {
|
||||
'label': 'Battery Entity ID',
|
||||
'category': 'home_assistant',
|
||||
'description': 'Home Assistant entity used for battery state-of-charge gating.',
|
||||
'value_type': 'string',
|
||||
},
|
||||
'HOME_ASSISTANT_SURPLUS_ENTITY_ID': {
|
||||
'label': 'Surplus Power Entity ID',
|
||||
'category': 'home_assistant',
|
||||
'description': 'Home Assistant entity used for export or surplus power gating.',
|
||||
'value_type': 'string',
|
||||
},
|
||||
'HOME_ASSISTANT_BATTERY_FULL_THRESHOLD': {
|
||||
'label': 'Battery Full Threshold',
|
||||
'category': 'home_assistant',
|
||||
'description': 'Minimum battery percentage required before queued prompts may run.',
|
||||
'value_type': 'float',
|
||||
},
|
||||
'HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS': {
|
||||
'label': 'Surplus Threshold Watts',
|
||||
'category': 'home_assistant',
|
||||
'description': 'Minimum surplus/export power required before queued prompts may run.',
|
||||
'value_type': 'float',
|
||||
},
|
||||
'PROMPT_QUEUE_ENABLED': {
|
||||
'label': 'Queue Telegram Prompts',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'When enabled, Telegram prompts are queued and gated instead of processed immediately.',
|
||||
'value_type': 'boolean',
|
||||
},
|
||||
'PROMPT_QUEUE_AUTO_PROCESS': {
|
||||
'label': 'Auto Process Queue',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'Let the background worker drain the queue automatically when the gate is open.',
|
||||
'value_type': 'boolean',
|
||||
},
|
||||
'PROMPT_QUEUE_FORCE_PROCESS': {
|
||||
'label': 'Force Queue Processing',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'Bypass the Home Assistant energy gate for queued prompts.',
|
||||
'value_type': 'boolean',
|
||||
},
|
||||
'PROMPT_QUEUE_POLL_INTERVAL_SECONDS': {
|
||||
'label': 'Queue Poll Interval Seconds',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'Polling interval for the background queue worker.',
|
||||
'value_type': 'integer',
|
||||
},
|
||||
'PROMPT_QUEUE_MAX_BATCH_SIZE': {
|
||||
'label': 'Queue Max Batch Size',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'Maximum number of queued prompts processed in one batch.',
|
||||
'value_type': 'integer',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _get_persisted_llm_prompt_override(env_key: str) -> str | None:
|
||||
"""Load one persisted LLM prompt override from the database when available."""
|
||||
@@ -92,6 +149,62 @@ def _resolve_llm_prompt_value(env_key: str, fallback: str) -> str:
|
||||
return (fallback or '').strip()
|
||||
|
||||
|
||||
def _get_persisted_runtime_setting_override(key: str):
|
||||
"""Load one persisted runtime-setting override from the database when available."""
|
||||
if key not in EDITABLE_RUNTIME_SETTINGS:
|
||||
return None
|
||||
try:
|
||||
try:
|
||||
from .database import get_db_sync
|
||||
from .agents.database_manager import DatabaseManager
|
||||
except ImportError:
|
||||
from database import get_db_sync
|
||||
from agents.database_manager import DatabaseManager
|
||||
|
||||
db = get_db_sync()
|
||||
if db is None:
|
||||
return None
|
||||
try:
|
||||
return DatabaseManager(db).get_runtime_setting_override(key)
|
||||
finally:
|
||||
db.close()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _coerce_runtime_setting_value(key: str, value, fallback):
|
||||
"""Coerce a persisted runtime setting override into the expected scalar type."""
|
||||
value_type = EDITABLE_RUNTIME_SETTINGS.get(key, {}).get('value_type')
|
||||
if value is None:
|
||||
return fallback
|
||||
if value_type == 'boolean':
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
normalized = str(value).strip().lower()
|
||||
if normalized in {'1', 'true', 'yes', 'on'}:
|
||||
return True
|
||||
if normalized in {'0', 'false', 'no', 'off'}:
|
||||
return False
|
||||
return bool(fallback)
|
||||
if value_type == 'integer':
|
||||
try:
|
||||
return int(value)
|
||||
except Exception:
|
||||
return int(fallback)
|
||||
if value_type == 'float':
|
||||
try:
|
||||
return float(value)
|
||||
except Exception:
|
||||
return float(fallback)
|
||||
return str(value).strip()
|
||||
|
||||
|
||||
def _resolve_runtime_setting_value(key: str, fallback):
|
||||
"""Resolve one editable runtime setting from DB override first, then environment/defaults."""
|
||||
override = _get_persisted_runtime_setting_override(key)
|
||||
return _coerce_runtime_setting_value(key, override, fallback)
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""Application settings loaded from environment variables."""
|
||||
|
||||
@@ -120,10 +233,10 @@ class Settings(BaseSettings):
|
||||
"For summaries: only describe facts present in the provided context and tool outputs. Never claim a repository, commit, or pull request exists unless it is present in the supplied data."
|
||||
)
|
||||
LLM_PROJECT_NAMING_GUARDRAIL_PROMPT: str = (
|
||||
"For project naming: prefer clear, product-like names and repository slugs that match the user's intent. Avoid reusing tracked project identities unless the request is clearly asking for an existing project."
|
||||
"For project naming: prefer clear, product-like names and repository slugs that match the user's concrete deliverable. Avoid abstract or instructional words such as purpose, project, system, app, tool, platform, solution, new, create, or test unless the request truly centers on that exact noun. Base the name on the actual artifact or workflow being built, and avoid copying sentence fragments from the prompt. Avoid reusing tracked project identities unless the request is clearly asking for an existing project."
|
||||
)
|
||||
LLM_PROJECT_NAMING_SYSTEM_PROMPT: str = (
|
||||
"You name newly requested software projects. Return only JSON with keys project_name, repo_name, and rationale. Project names should be concise human-readable titles. Repo names should be lowercase kebab-case slugs suitable for a Gitea repository name."
|
||||
"You name newly requested software projects. Return only JSON with keys project_name, repo_name, and rationale. Project names should be concise human-readable titles based on the real product, artifact, or workflow being created. Repo names should be lowercase kebab-case slugs derived from that title. Never return generic names like purpose, project, system, app, tool, platform, solution, harness, or test by themselves, and never return a repo_name that is a copied sentence fragment from the prompt. Prefer 2 to 4 specific words when possible."
|
||||
)
|
||||
LLM_PROJECT_ID_GUARDRAIL_PROMPT: str = (
|
||||
"For project ids: produce short stable slugs for newly created projects. Avoid collisions with known project ids and keep ids lowercase with hyphens."
|
||||
@@ -309,6 +422,26 @@ class Settings(BaseSettings):
|
||||
)
|
||||
return prompts
|
||||
|
||||
@property
|
||||
def editable_runtime_settings(self) -> list[dict]:
|
||||
"""Return metadata for all DB-editable runtime settings."""
|
||||
items = []
|
||||
for key, metadata in EDITABLE_RUNTIME_SETTINGS.items():
|
||||
default_value = getattr(self, key)
|
||||
value = _resolve_runtime_setting_value(key, default_value)
|
||||
items.append(
|
||||
{
|
||||
'key': key,
|
||||
'label': metadata['label'],
|
||||
'category': metadata['category'],
|
||||
'description': metadata['description'],
|
||||
'value_type': metadata['value_type'],
|
||||
'default_value': default_value,
|
||||
'value': value,
|
||||
}
|
||||
)
|
||||
return items
|
||||
|
||||
@property
|
||||
def llm_tool_allowlist(self) -> list[str]:
|
||||
"""Get the allowed LLM tool names as a normalized list."""
|
||||
@@ -438,47 +571,47 @@ class Settings(BaseSettings):
|
||||
@property
|
||||
def home_assistant_battery_entity_id(self) -> str:
|
||||
"""Get the Home Assistant battery state entity id."""
|
||||
return self.HOME_ASSISTANT_BATTERY_ENTITY_ID.strip()
|
||||
return str(_resolve_runtime_setting_value('HOME_ASSISTANT_BATTERY_ENTITY_ID', self.HOME_ASSISTANT_BATTERY_ENTITY_ID)).strip()
|
||||
|
||||
@property
|
||||
def home_assistant_surplus_entity_id(self) -> str:
|
||||
"""Get the Home Assistant surplus power entity id."""
|
||||
return self.HOME_ASSISTANT_SURPLUS_ENTITY_ID.strip()
|
||||
return str(_resolve_runtime_setting_value('HOME_ASSISTANT_SURPLUS_ENTITY_ID', self.HOME_ASSISTANT_SURPLUS_ENTITY_ID)).strip()
|
||||
|
||||
@property
|
||||
def home_assistant_battery_full_threshold(self) -> float:
|
||||
"""Get the minimum battery SoC percentage for queue processing."""
|
||||
return float(self.HOME_ASSISTANT_BATTERY_FULL_THRESHOLD)
|
||||
return float(_resolve_runtime_setting_value('HOME_ASSISTANT_BATTERY_FULL_THRESHOLD', self.HOME_ASSISTANT_BATTERY_FULL_THRESHOLD))
|
||||
|
||||
@property
|
||||
def home_assistant_surplus_threshold_watts(self) -> float:
|
||||
"""Get the minimum export/surplus power threshold for queue processing."""
|
||||
return float(self.HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS)
|
||||
return float(_resolve_runtime_setting_value('HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS', self.HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS))
|
||||
|
||||
@property
|
||||
def prompt_queue_enabled(self) -> bool:
|
||||
"""Whether Telegram prompts should be queued instead of processed immediately."""
|
||||
return bool(self.PROMPT_QUEUE_ENABLED)
|
||||
return bool(_resolve_runtime_setting_value('PROMPT_QUEUE_ENABLED', self.PROMPT_QUEUE_ENABLED))
|
||||
|
||||
@property
|
||||
def prompt_queue_auto_process(self) -> bool:
|
||||
"""Whether the background worker should automatically process queued prompts."""
|
||||
return bool(self.PROMPT_QUEUE_AUTO_PROCESS)
|
||||
return bool(_resolve_runtime_setting_value('PROMPT_QUEUE_AUTO_PROCESS', self.PROMPT_QUEUE_AUTO_PROCESS))
|
||||
|
||||
@property
|
||||
def prompt_queue_force_process(self) -> bool:
|
||||
"""Whether queued prompts should bypass the Home Assistant energy gate."""
|
||||
return bool(self.PROMPT_QUEUE_FORCE_PROCESS)
|
||||
return bool(_resolve_runtime_setting_value('PROMPT_QUEUE_FORCE_PROCESS', self.PROMPT_QUEUE_FORCE_PROCESS))
|
||||
|
||||
@property
|
||||
def prompt_queue_poll_interval_seconds(self) -> int:
|
||||
"""Get the queue polling interval for background processing."""
|
||||
return max(int(self.PROMPT_QUEUE_POLL_INTERVAL_SECONDS), 5)
|
||||
return max(int(_resolve_runtime_setting_value('PROMPT_QUEUE_POLL_INTERVAL_SECONDS', self.PROMPT_QUEUE_POLL_INTERVAL_SECONDS)), 5)
|
||||
|
||||
@property
|
||||
def prompt_queue_max_batch_size(self) -> int:
|
||||
"""Get the maximum number of queued prompts to process in one batch."""
|
||||
return max(int(self.PROMPT_QUEUE_MAX_BATCH_SIZE), 1)
|
||||
return max(int(_resolve_runtime_setting_value('PROMPT_QUEUE_MAX_BATCH_SIZE', self.PROMPT_QUEUE_MAX_BATCH_SIZE)), 1)
|
||||
|
||||
@property
|
||||
def projects_root(self) -> Path:
|
||||
|
||||
@@ -214,6 +214,70 @@ def _render_commit_list(commits: list[dict]) -> None:
|
||||
ui.link('Open compare view', compare_url, new_tab=True)
|
||||
|
||||
|
||||
def _render_generation_diagnostics(ui_data: dict | None) -> None:
|
||||
"""Render generation and git diagnostics from the latest UI snapshot."""
|
||||
snapshot = ui_data if isinstance(ui_data, dict) else {}
|
||||
generation_debug = snapshot.get('generation_debug') if isinstance(snapshot.get('generation_debug'), dict) else {}
|
||||
git_debug = snapshot.get('git') if isinstance(snapshot.get('git'), dict) else {}
|
||||
|
||||
if not generation_debug and not git_debug:
|
||||
ui.label('No generation diagnostics captured yet.').classes('factory-muted')
|
||||
return
|
||||
|
||||
def _render_path_row(label: str, values: list[str]) -> None:
|
||||
text = ', '.join(values) if values else 'none'
|
||||
ui.label(f'{label}: {text}').classes('factory-muted' if values else 'factory-code')
|
||||
|
||||
with ui.column().classes('gap-3 w-full'):
|
||||
if generation_debug:
|
||||
with ui.column().classes('gap-1'):
|
||||
ui.label('Generation filtering').style('font-weight: 700; color: #2f241d;')
|
||||
ui.label(
|
||||
'Existing workspace: '
|
||||
+ ('yes' if generation_debug.get('existing_workspace') else 'no')
|
||||
).classes('factory-muted')
|
||||
_render_path_row('Raw paths', generation_debug.get('raw_paths') or [])
|
||||
_render_path_row('Accepted paths', generation_debug.get('accepted_paths') or [])
|
||||
_render_path_row('Rejected paths', generation_debug.get('rejected_paths') or [])
|
||||
if git_debug:
|
||||
with ui.column().classes('gap-1'):
|
||||
ui.label('Git outcome').style('font-weight: 700; color: #2f241d;')
|
||||
if git_debug.get('commit_status'):
|
||||
with ui.row().classes('items-center gap-2'):
|
||||
ui.label(git_debug['commit_status']).classes('factory-chip')
|
||||
if git_debug.get('early_exit_reason'):
|
||||
ui.label(git_debug['early_exit_reason']).classes('factory-chip')
|
||||
if git_debug.get('candidate_files') is not None:
|
||||
_render_path_row('Candidate files', git_debug.get('candidate_files') or [])
|
||||
latest_commit = git_debug.get('latest_commit') if isinstance(git_debug.get('latest_commit'), dict) else {}
|
||||
if latest_commit:
|
||||
ui.label(
|
||||
f"Latest commit: {(latest_commit.get('hash') or 'unknown')[:12]} · {latest_commit.get('scope') or 'local'}"
|
||||
).classes('factory-muted')
|
||||
if git_debug.get('status_after_add'):
|
||||
with ui.expansion('Git status after staging').classes('w-full q-mt-sm'):
|
||||
ui.label(str(git_debug['status_after_add'])).classes('factory-code')
|
||||
if git_debug.get('remote_error'):
|
||||
ui.label(f"Remote push error: {git_debug['remote_error']}").classes('factory-code')
|
||||
if git_debug.get('error'):
|
||||
ui.label(f"Git error: {git_debug['error']}").classes('factory-code')
|
||||
pull_request_debug = git_debug.get('pull_request_debug') if isinstance(git_debug.get('pull_request_debug'), dict) else {}
|
||||
if pull_request_debug:
|
||||
ui.label('Pull request creation').style('font-weight: 700; color: #2f241d;')
|
||||
if pull_request_debug.get('status'):
|
||||
ui.label(str(pull_request_debug['status'])).classes('factory-chip')
|
||||
if pull_request_debug.get('request'):
|
||||
with ui.expansion('PR request payload').classes('w-full q-mt-sm'):
|
||||
ui.label(json.dumps(pull_request_debug['request'], indent=2, sort_keys=True)).classes('factory-code')
|
||||
if pull_request_debug.get('response'):
|
||||
with ui.expansion('PR API response').classes('w-full q-mt-sm'):
|
||||
ui.label(json.dumps(pull_request_debug['response'], indent=2, sort_keys=True)).classes('factory-code')
|
||||
if pull_request_debug.get('resolved'):
|
||||
resolved = pull_request_debug['resolved']
|
||||
if resolved.get('pr_url'):
|
||||
ui.link('Open pull request', resolved['pr_url'], new_tab=True).classes('factory-code')
|
||||
|
||||
|
||||
def _render_timeline(events: list[dict]) -> None:
|
||||
"""Render a mixed project timeline."""
|
||||
if not events:
|
||||
@@ -665,6 +729,24 @@ def _add_dashboard_styles() -> None:
|
||||
"""Register shared dashboard styles."""
|
||||
ui.add_head_html(
|
||||
"""
|
||||
<script>
|
||||
(() => {
|
||||
const scrollKey = 'factory-dashboard-scroll-y';
|
||||
const rememberScroll = () => sessionStorage.setItem(scrollKey, String(window.scrollY || 0));
|
||||
const restoreScroll = () => {
|
||||
const stored = sessionStorage.getItem(scrollKey);
|
||||
if (stored === null) return;
|
||||
window.requestAnimationFrame(() => window.scrollTo({top: Number(stored) || 0, left: 0, behavior: 'auto'}));
|
||||
};
|
||||
window.addEventListener('scroll', rememberScroll, {passive: true});
|
||||
document.addEventListener('click', rememberScroll, true);
|
||||
const observer = new MutationObserver(() => restoreScroll());
|
||||
window.addEventListener('load', () => {
|
||||
observer.observe(document.body, {childList: true, subtree: true});
|
||||
restoreScroll();
|
||||
});
|
||||
})();
|
||||
</script>
|
||||
<style>
|
||||
body { background: radial-gradient(circle at top, #f4efe7 0%, #e9e1d4 38%, #d7cec1 100%); }
|
||||
.factory-shell { max-width: 1240px; margin: 0 auto; }
|
||||
@@ -867,10 +949,31 @@ def create_dashboard():
|
||||
repo_discovery_key = 'dashboard.repo_discovery'
|
||||
repo_owner_key = 'dashboard.repo_owner'
|
||||
repo_name_key = 'dashboard.repo_name'
|
||||
expansion_state_prefix = 'dashboard.expansion.'
|
||||
|
||||
def _expansion_state_key(name: str) -> str:
|
||||
return f'{expansion_state_prefix}{name}'
|
||||
|
||||
def _expansion_value(name: str, default: bool = False) -> bool:
|
||||
return bool(app.storage.user.get(_expansion_state_key(name), default))
|
||||
|
||||
def _store_expansion_value(name: str, event) -> None:
|
||||
app.storage.user[_expansion_state_key(name)] = bool(event.value)
|
||||
|
||||
def _sticky_expansion(name: str, text: str, *, icon: str | None = None, default: bool = False, classes: str = 'w-full'):
|
||||
return ui.expansion(
|
||||
text,
|
||||
icon=icon,
|
||||
value=_expansion_value(name, default),
|
||||
on_value_change=lambda event, expansion_name=name: _store_expansion_value(expansion_name, event),
|
||||
).classes(classes)
|
||||
|
||||
def _llm_prompt_draft_key(prompt_key: str) -> str:
|
||||
return f'dashboard.llm_prompt_draft.{prompt_key}'
|
||||
|
||||
def _runtime_setting_draft_key(setting_key: str) -> str:
|
||||
return f'dashboard.runtime_setting_draft.{setting_key}'
|
||||
|
||||
def _selected_tab_name() -> str:
|
||||
"""Return the persisted active dashboard tab."""
|
||||
return app.storage.user.get(active_tab_key, 'overview')
|
||||
@@ -940,6 +1043,15 @@ def create_dashboard():
|
||||
def _clear_prompt_draft(prompt_key: str) -> None:
|
||||
app.storage.user.pop(_llm_prompt_draft_key(prompt_key), None)
|
||||
|
||||
def _runtime_setting_draft_value(setting_key: str, fallback):
|
||||
return app.storage.user.get(_runtime_setting_draft_key(setting_key), fallback)
|
||||
|
||||
def _store_runtime_setting_draft(setting_key: str, value) -> None:
|
||||
app.storage.user[_runtime_setting_draft_key(setting_key)] = value
|
||||
|
||||
def _clear_runtime_setting_draft(setting_key: str) -> None:
|
||||
app.storage.user.pop(_runtime_setting_draft_key(setting_key), None)
|
||||
|
||||
def _call_backend_json(path: str, method: str = 'GET', payload: dict | None = None) -> dict:
|
||||
target = f"{settings.backend_public_url}{path}"
|
||||
data = json.dumps(payload).encode('utf-8') if payload is not None else None
|
||||
@@ -1136,6 +1248,26 @@ def create_dashboard():
|
||||
ui.notify('Queued prompt returned to pending state', color='positive')
|
||||
_refresh_all_dashboard_sections()
|
||||
|
||||
def purge_orphan_code_changes_action(project_id: str | None = None) -> None:
|
||||
db = get_db_sync()
|
||||
if db is None:
|
||||
ui.notify('Database session could not be created', color='negative')
|
||||
return
|
||||
with closing(db):
|
||||
result = DatabaseManager(db).cleanup_orphan_code_changes(project_id=project_id)
|
||||
ui.notify(result.get('message', 'Audit cleanup completed'), color='positive')
|
||||
_refresh_all_dashboard_sections()
|
||||
|
||||
def retry_project_delivery_action(project_id: str) -> None:
|
||||
db = get_db_sync()
|
||||
if db is None:
|
||||
ui.notify('Database session could not be created', color='negative')
|
||||
return
|
||||
with closing(db):
|
||||
result = DatabaseManager(db).retry_project_delivery(project_id)
|
||||
ui.notify(result.get('message', 'Delivery retry completed'), color='positive' if result.get('status') == 'success' else 'negative')
|
||||
_refresh_all_dashboard_sections()
|
||||
|
||||
def save_llm_prompt_action(prompt_key: str) -> None:
|
||||
db = get_db_sync()
|
||||
if db is None:
|
||||
@@ -1166,6 +1298,36 @@ def create_dashboard():
|
||||
ui.notify('LLM prompt setting reset to environment default', color='positive')
|
||||
_refresh_system_sections()
|
||||
|
||||
def save_runtime_setting_action(setting_key: str) -> None:
|
||||
db = get_db_sync()
|
||||
if db is None:
|
||||
ui.notify('Database session could not be created', color='negative')
|
||||
return
|
||||
with closing(db):
|
||||
current = next((item for item in DatabaseManager(db).get_runtime_settings() if item['key'] == setting_key), None)
|
||||
value = _runtime_setting_draft_value(setting_key, current['value'] if current else None)
|
||||
result = DatabaseManager(db).save_runtime_setting(setting_key, value, actor='dashboard')
|
||||
if result.get('status') == 'error':
|
||||
ui.notify(result.get('message', 'Runtime setting save failed'), color='negative')
|
||||
return
|
||||
_clear_runtime_setting_draft(setting_key)
|
||||
ui.notify('Runtime setting saved', color='positive')
|
||||
_refresh_all_dashboard_sections()
|
||||
|
||||
def reset_runtime_setting_action(setting_key: str) -> None:
|
||||
db = get_db_sync()
|
||||
if db is None:
|
||||
ui.notify('Database session could not be created', color='negative')
|
||||
return
|
||||
with closing(db):
|
||||
result = DatabaseManager(db).reset_runtime_setting(setting_key, actor='dashboard')
|
||||
if result.get('status') == 'error':
|
||||
ui.notify(result.get('message', 'Runtime setting reset failed'), color='negative')
|
||||
return
|
||||
_clear_runtime_setting_draft(setting_key)
|
||||
ui.notify('Runtime setting reset to environment default', color='positive')
|
||||
_refresh_all_dashboard_sections()
|
||||
|
||||
def init_db_action() -> None:
|
||||
result = init_db()
|
||||
ui.notify(result.get('message', 'Database initialized'), color='positive' if result.get('status') == 'success' else 'negative')
|
||||
@@ -1244,13 +1406,16 @@ def create_dashboard():
|
||||
commit_lookup_query = _selected_commit_lookup()
|
||||
discovered_repositories = _get_discovered_repositories()
|
||||
prompt_settings = settings.editable_llm_prompts
|
||||
runtime_settings = settings.editable_runtime_settings
|
||||
db = get_db_sync()
|
||||
if db is not None:
|
||||
with closing(db):
|
||||
try:
|
||||
prompt_settings = DatabaseManager(db).get_llm_prompt_settings()
|
||||
runtime_settings = DatabaseManager(db).get_runtime_settings()
|
||||
except Exception:
|
||||
prompt_settings = settings.editable_llm_prompts
|
||||
runtime_settings = settings.editable_runtime_settings
|
||||
if snapshot.get('error'):
|
||||
return {
|
||||
'error': snapshot['error'],
|
||||
@@ -1262,6 +1427,7 @@ def create_dashboard():
|
||||
'commit_lookup_query': commit_lookup_query,
|
||||
'discovered_repositories': discovered_repositories,
|
||||
'prompt_settings': prompt_settings,
|
||||
'runtime_settings': runtime_settings,
|
||||
}
|
||||
projects = snapshot['projects']
|
||||
all_llm_traces = [trace for project_bundle in projects for trace in project_bundle.get('llm_traces', [])]
|
||||
@@ -1281,6 +1447,7 @@ def create_dashboard():
|
||||
'commit_context': _load_commit_context(commit_lookup_query, branch_scope_filter) if commit_lookup_query else None,
|
||||
'discovered_repositories': discovered_repositories,
|
||||
'prompt_settings': prompt_settings,
|
||||
'runtime_settings': runtime_settings,
|
||||
'llm_stage_options': [''] + sorted({trace.get('stage') for trace in all_llm_traces if trace.get('stage')}),
|
||||
'llm_model_options': [''] + sorted({trace.get('model') for trace in all_llm_traces if trace.get('model')}),
|
||||
'project_repository_map': {
|
||||
@@ -1337,6 +1504,7 @@ def create_dashboard():
|
||||
('Completed', summary['completed_projects'], 'Finished project runs'),
|
||||
('Prompts', summary['prompt_events'], 'Recorded originating prompts'),
|
||||
('Open PRs', summary['open_pull_requests'], 'Unmerged review branches'),
|
||||
('Orphans', summary.get('orphan_code_changes', 0), 'Generated diffs with no matching commit'),
|
||||
]
|
||||
for title, value, subtitle in metrics:
|
||||
with ui.card().classes('factory-kpi'):
|
||||
@@ -1355,15 +1523,38 @@ def create_dashboard():
|
||||
with ui.grid(columns=2).classes('w-full gap-4'):
|
||||
with ui.card().classes('factory-panel q-pa-lg'):
|
||||
ui.label('Project Pipeline').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
|
||||
if summary.get('orphan_code_changes'):
|
||||
with ui.card().classes('q-pa-md q-mt-md').style('background: #fff4dd; border: 1px solid #e0b36a;'):
|
||||
ui.label('Uncommitted generated changes detected').style('font-weight: 700; color: #7a4b16;')
|
||||
ui.label(
|
||||
f"{summary['orphan_code_changes']} generated file change row(s) have no matching git commit or PR delivery record."
|
||||
).classes('factory-muted')
|
||||
ui.button(
|
||||
'Purge orphan change rows',
|
||||
on_click=lambda: _render_confirmation_dialog(
|
||||
'Purge orphaned generated change rows?',
|
||||
'Delete only generated CODE_CHANGE audit rows that have no matching git commit. Valid prompt, commit, and PR history will be kept.',
|
||||
'Purge Orphans',
|
||||
lambda: purge_orphan_code_changes_action(),
|
||||
color='warning',
|
||||
),
|
||||
).props('outline color=warning').classes('q-mt-sm')
|
||||
if projects:
|
||||
for project_bundle in projects[:4]:
|
||||
project = project_bundle['project']
|
||||
with ui.column().classes('gap-1 q-mt-md'):
|
||||
with ui.row().classes('justify-between items-center'):
|
||||
ui.label(project['project_name']).style('font-weight: 700; color: #2f241d;')
|
||||
with ui.row().classes('items-center gap-2'):
|
||||
if project.get('delivery_status') in {'uncommitted', 'local_only', 'pushed_no_pr'}:
|
||||
ui.label(project.get('delivery_status', 'delivery')).classes('factory-chip')
|
||||
ui.label(project['status']).classes('factory-chip')
|
||||
ui.linear_progress(value=(project['progress'] or 0) / 100, show_value=False).classes('w-full')
|
||||
ui.label(project['message'] or 'No status message').classes('factory-muted')
|
||||
ui.label(
|
||||
project.get('delivery_message')
|
||||
if project.get('delivery_status') in {'uncommitted', 'local_only', 'pushed_no_pr'}
|
||||
else project['message'] or 'No status message'
|
||||
).classes('factory-muted')
|
||||
else:
|
||||
ui.label('No projects in the database yet.').classes('factory-muted')
|
||||
|
||||
@@ -1393,7 +1584,12 @@ def create_dashboard():
|
||||
ui.label('No project data available yet.').classes('factory-muted')
|
||||
for project_bundle in projects:
|
||||
project = project_bundle['project']
|
||||
with ui.expansion(f"{project['project_name']} · {project['status']}", icon='folder').classes('factory-panel w-full q-mb-md'):
|
||||
with _sticky_expansion(
|
||||
f"projects.{project['project_id']}",
|
||||
f"{project['project_name']} · {project['status']}",
|
||||
icon='folder',
|
||||
classes='factory-panel w-full q-mb-md',
|
||||
):
|
||||
with ui.row().classes('items-center gap-2 q-pa-md'):
|
||||
ui.button(
|
||||
'Archive',
|
||||
@@ -1414,6 +1610,28 @@ def create_dashboard():
|
||||
lambda: delete_project_action(project_id),
|
||||
),
|
||||
).props('outline color=negative')
|
||||
if project.get('delivery_status') in {'uncommitted', 'local_only', 'pushed_no_pr'}:
|
||||
with ui.card().classes('q-ma-md q-pa-md').style('background: #fff4dd; border: 1px solid #e0b36a;'):
|
||||
with ui.row().classes('items-center justify-between w-full gap-3'):
|
||||
with ui.column().classes('gap-1'):
|
||||
ui.label('Remote delivery attention needed').style('font-weight: 700; color: #7a4b16;')
|
||||
ui.label(project.get('delivery_message') or 'Generated changes were not published to the tracked repository.').classes('factory-muted')
|
||||
with ui.row().classes('items-center gap-2'):
|
||||
ui.button(
|
||||
'Retry delivery',
|
||||
on_click=lambda _=None, project_id=project['project_id']: retry_project_delivery_action(project_id),
|
||||
).props('outline color=positive')
|
||||
if project.get('delivery_status') == 'uncommitted':
|
||||
ui.button(
|
||||
'Purge project orphan rows',
|
||||
on_click=lambda _=None, project_id=project['project_id']: _render_confirmation_dialog(
|
||||
'Purge orphaned generated change rows for this project?',
|
||||
'Delete only generated CODE_CHANGE audit rows for this project that have no matching git commit. Valid history remains intact.',
|
||||
'Purge Project Orphans',
|
||||
lambda: purge_orphan_code_changes_action(project_id),
|
||||
color='warning',
|
||||
),
|
||||
).props('outline color=warning')
|
||||
with ui.grid(columns=2).classes('w-full gap-4 q-pa-md'):
|
||||
with ui.card().classes('q-pa-md'):
|
||||
ui.label('Repository').style('font-weight: 700; color: #3a281a;')
|
||||
@@ -1422,6 +1640,9 @@ def create_dashboard():
|
||||
'Sync Repo Activity',
|
||||
on_click=lambda _=None, project_id=project['project_id']: sync_project_repository_action(project_id),
|
||||
).props('outline color=secondary').classes('q-mt-md')
|
||||
with ui.card().classes('q-pa-md'):
|
||||
ui.label('Generation Diagnostics').style('font-weight: 700; color: #3a281a;')
|
||||
_render_generation_diagnostics(project_bundle.get('ui_data'))
|
||||
|
||||
@ui.refreshable
|
||||
def render_archived_panel() -> None:
|
||||
@@ -1438,7 +1659,12 @@ def create_dashboard():
|
||||
ui.label('No archived projects yet.').classes('factory-muted')
|
||||
for project_bundle in archived_projects:
|
||||
project = project_bundle['project']
|
||||
with ui.expansion(f"{project['project_name']} · archived", icon='archive').classes('factory-panel w-full q-mb-md'):
|
||||
with _sticky_expansion(
|
||||
f"archived.{project['project_id']}",
|
||||
f"{project['project_name']} · archived",
|
||||
icon='archive',
|
||||
classes='factory-panel w-full q-mb-md',
|
||||
):
|
||||
with ui.row().classes('items-center gap-2 q-pa-md'):
|
||||
ui.button(
|
||||
'Restore',
|
||||
@@ -1459,10 +1685,33 @@ def create_dashboard():
|
||||
lambda: delete_project_action(project_id),
|
||||
),
|
||||
).props('outline color=negative')
|
||||
if project.get('delivery_status') in {'uncommitted', 'local_only', 'pushed_no_pr'}:
|
||||
with ui.card().classes('q-ma-md q-pa-md').style('background: #fff4dd; border: 1px solid #e0b36a;'):
|
||||
ui.label('Archived project needs delivery attention').style('font-weight: 700; color: #7a4b16;')
|
||||
ui.label(project.get('delivery_message') or 'Generated changes were not published to the tracked repository.').classes('factory-muted')
|
||||
with ui.row().classes('items-center gap-2 q-mt-sm'):
|
||||
ui.button(
|
||||
'Retry delivery',
|
||||
on_click=lambda _=None, project_id=project['project_id']: retry_project_delivery_action(project_id),
|
||||
).props('outline color=positive')
|
||||
if project.get('delivery_status') == 'uncommitted':
|
||||
ui.button(
|
||||
'Purge archived project orphan rows',
|
||||
on_click=lambda _=None, project_id=project['project_id']: _render_confirmation_dialog(
|
||||
'Purge orphaned generated change rows for this archived project?',
|
||||
'Delete only generated CODE_CHANGE audit rows for this project that have no matching git commit. Valid history remains intact.',
|
||||
'Purge Archived Orphans',
|
||||
lambda: purge_orphan_code_changes_action(project_id),
|
||||
color='warning',
|
||||
),
|
||||
).props('outline color=warning')
|
||||
with ui.grid(columns=2).classes('w-full gap-4 q-pa-md'):
|
||||
with ui.card().classes('q-pa-md'):
|
||||
ui.label('Repository').style('font-weight: 700; color: #3a281a;')
|
||||
_render_repository_block(project_bundle.get('repository') or project.get('repository'))
|
||||
with ui.card().classes('q-pa-md'):
|
||||
ui.label('Generation Diagnostics').style('font-weight: 700; color: #3a281a;')
|
||||
_render_generation_diagnostics(project_bundle.get('ui_data'))
|
||||
with ui.card().classes('q-pa-md'):
|
||||
ui.label('Prompt').style('font-weight: 700; color: #3a281a;')
|
||||
prompts = project_bundle.get('prompts', [])
|
||||
@@ -1645,7 +1894,12 @@ def create_dashboard():
|
||||
if projects:
|
||||
for project_bundle in projects:
|
||||
project = project_bundle['project']
|
||||
with ui.expansion(f"{project['project_name']} · {project['project_id']}", icon='schedule').classes('q-mt-md w-full'):
|
||||
with _sticky_expansion(
|
||||
f"timeline.{project['project_id']}",
|
||||
f"{project['project_name']} · {project['project_id']}",
|
||||
icon='schedule',
|
||||
classes='q-mt-md w-full',
|
||||
):
|
||||
_render_timeline(_filter_timeline_events(project_bundle.get('timeline', []), branch_scope_filter))
|
||||
else:
|
||||
ui.label('No project timelines recorded yet.').classes('factory-muted')
|
||||
@@ -1660,6 +1914,7 @@ def create_dashboard():
|
||||
llm_runtime = view_model['llm_runtime']
|
||||
discovered_repositories = view_model['discovered_repositories']
|
||||
prompt_settings = view_model.get('prompt_settings', [])
|
||||
runtime_settings = view_model.get('runtime_settings', [])
|
||||
with ui.grid(columns=2).classes('w-full gap-4'):
|
||||
with ui.card().classes('factory-panel q-pa-lg'):
|
||||
ui.label('System Logs').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
|
||||
@@ -1710,6 +1965,45 @@ def create_dashboard():
|
||||
for label, text in system_prompts.items():
|
||||
ui.label(label.replace('_', ' ').title()).classes('factory-muted q-mt-sm')
|
||||
ui.label(text or 'Not configured').classes('factory-code')
|
||||
with ui.card().classes('factory-panel q-pa-lg'):
|
||||
ui.label('Home Assistant and Queue Settings').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
|
||||
ui.label('Keep only the Home Assistant base URL and access token in the environment. Entity ids, thresholds, and queue behavior are edited here and persisted in the database.').classes('factory-muted')
|
||||
for setting in runtime_settings:
|
||||
with ui.card().classes('q-pa-sm q-mt-md'):
|
||||
with ui.row().classes('items-center justify-between w-full'):
|
||||
with ui.column().classes('gap-1'):
|
||||
ui.label(setting['label']).style('font-weight: 700; color: #2f241d;')
|
||||
ui.label(setting.get('description') or '').classes('factory-muted')
|
||||
with ui.row().classes('items-center gap-2'):
|
||||
ui.label(setting.get('category', 'setting')).classes('factory-chip')
|
||||
ui.label(setting.get('source', 'environment')).classes('factory-chip')
|
||||
draft_value = _runtime_setting_draft_value(setting['key'], setting.get('value'))
|
||||
if setting.get('value_type') == 'boolean':
|
||||
ui.switch(
|
||||
value=bool(draft_value),
|
||||
on_change=lambda event, setting_key=setting['key']: _store_runtime_setting_draft(setting_key, bool(event.value)),
|
||||
).props('color=accent').classes('q-mt-sm')
|
||||
elif setting.get('value_type') == 'integer':
|
||||
ui.number(
|
||||
value=int(draft_value),
|
||||
on_change=lambda event, setting_key=setting['key']: _store_runtime_setting_draft(setting_key, int(event.value) if event.value is not None else None),
|
||||
).classes('w-full q-mt-sm')
|
||||
elif setting.get('value_type') == 'float':
|
||||
ui.number(
|
||||
value=float(draft_value),
|
||||
on_change=lambda event, setting_key=setting['key']: _store_runtime_setting_draft(setting_key, float(event.value) if event.value is not None else None),
|
||||
).classes('w-full q-mt-sm')
|
||||
else:
|
||||
ui.input(
|
||||
value=str(draft_value or ''),
|
||||
on_change=lambda event, setting_key=setting['key']: _store_runtime_setting_draft(setting_key, event.value or ''),
|
||||
).classes('w-full q-mt-sm')
|
||||
ui.label(f"Environment default: {setting.get('default_value')}").classes('factory-muted q-mt-sm')
|
||||
if setting.get('updated_at'):
|
||||
ui.label(f"Last updated: {setting['updated_at']} by {setting.get('updated_by') or 'unknown'}").classes('factory-muted q-mt-sm')
|
||||
with ui.row().classes('items-center gap-2 q-mt-md'):
|
||||
ui.button('Save Override', on_click=lambda _=None, setting_key=setting['key']: save_runtime_setting_action(setting_key)).props('unelevated color=accent')
|
||||
ui.button('Reset To Default', on_click=lambda _=None, setting_key=setting['key']: reset_runtime_setting_action(setting_key)).props('outline color=warning')
|
||||
with ui.card().classes('factory-panel q-pa-lg'):
|
||||
ui.label('Editable LLM Prompts').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
|
||||
ui.label('These guardrails and system prompts are persisted in the database and override environment defaults until reset.').classes('factory-muted')
|
||||
@@ -1831,7 +2125,8 @@ def create_dashboard():
|
||||
_update_dashboard_state()
|
||||
panel_refreshers['metrics']()
|
||||
active_tab = _selected_tab_name()
|
||||
if active_tab in panel_refreshers:
|
||||
# Avoid rebuilding the more interactive tabs on the timer; manual refresh keeps them current.
|
||||
if active_tab in {'overview', 'health'} and active_tab in panel_refreshers:
|
||||
panel_refreshers[active_tab]()
|
||||
|
||||
def _refresh_all_dashboard_sections() -> None:
|
||||
|
||||
@@ -6,7 +6,7 @@ from urllib.parse import urlparse
|
||||
|
||||
from alembic import command
|
||||
from alembic.config import Config
|
||||
from sqlalchemy import create_engine, event, text
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
|
||||
@@ -64,20 +64,6 @@ def get_engine() -> Engine:
|
||||
pool_timeout=settings.DB_POOL_TIMEOUT or 30
|
||||
)
|
||||
|
||||
# Event listener for connection checkout (PostgreSQL only)
|
||||
if not settings.use_sqlite:
|
||||
@event.listens_for(engine, "checkout")
|
||||
def receive_checkout(dbapi_connection, connection_record, connection_proxy):
|
||||
"""Log connection checkout for audit purposes."""
|
||||
if settings.LOG_LEVEL in ("DEBUG", "INFO"):
|
||||
print(f"DB Connection checked out from pool")
|
||||
|
||||
@event.listens_for(engine, "checkin")
|
||||
def receive_checkin(dbapi_connection, connection_record):
|
||||
"""Log connection checkin for audit purposes."""
|
||||
if settings.LOG_LEVEL == "DEBUG":
|
||||
print(f"DB Connection returned to pool")
|
||||
|
||||
return engine
|
||||
|
||||
|
||||
|
||||
@@ -62,8 +62,6 @@ async def lifespan(_app: FastAPI):
|
||||
print(
|
||||
f"Runtime configuration: database_backend={runtime['backend']} target={runtime['target']}"
|
||||
)
|
||||
queue_worker = None
|
||||
if database_module.settings.prompt_queue_enabled and database_module.settings.prompt_queue_auto_process:
|
||||
queue_worker = asyncio.create_task(_prompt_queue_worker())
|
||||
try:
|
||||
yield
|
||||
@@ -124,6 +122,12 @@ class LLMPromptSettingUpdateRequest(BaseModel):
|
||||
value: str = Field(default='')
|
||||
|
||||
|
||||
class RuntimeSettingUpdateRequest(BaseModel):
|
||||
"""Request body for persisting one editable runtime setting override."""
|
||||
|
||||
value: str | bool | int | float | None = None
|
||||
|
||||
|
||||
class GiteaRepositoryOnboardRequest(BaseModel):
|
||||
"""Request body for onboarding a manually created Gitea repository."""
|
||||
|
||||
@@ -237,6 +241,17 @@ def _serialize_project_log(log: ProjectLog) -> dict:
|
||||
}
|
||||
|
||||
|
||||
def _ensure_summary_mentions_pull_request(summary_message: str, pull_request: dict | None) -> str:
|
||||
"""Append the pull request URL to chat summaries when one exists."""
|
||||
if not isinstance(pull_request, dict):
|
||||
return summary_message
|
||||
pr_url = (pull_request.get('pr_url') or '').strip()
|
||||
if not pr_url or pr_url in summary_message:
|
||||
return summary_message
|
||||
separator = '' if summary_message.endswith(('.', '!', '?')) else '.'
|
||||
return f"{summary_message}{separator} Review PR: {pr_url}"
|
||||
|
||||
|
||||
def _serialize_system_log(log: SystemLog) -> dict:
|
||||
"""Serialize a system log row."""
|
||||
return {
|
||||
@@ -302,7 +317,7 @@ async def _run_generation(
|
||||
resolved_prompt_text = prompt_text or _compose_prompt_text(request)
|
||||
if preferred_project_id and reusable_history is not None:
|
||||
project_id = reusable_history.project_id
|
||||
elif reusable_history and not is_explicit_new_project and manager.get_open_pull_request(project_id=reusable_history.project_id):
|
||||
elif reusable_history and not is_explicit_new_project:
|
||||
project_id = reusable_history.project_id
|
||||
else:
|
||||
if is_explicit_new_project or prompt_text:
|
||||
@@ -344,6 +359,8 @@ async def _run_generation(
|
||||
response_data = _serialize_project(history)
|
||||
response_data['logs'] = [_serialize_project_log(log) for log in project_logs]
|
||||
response_data['ui_data'] = result.get('ui_data')
|
||||
response_data['generation_debug'] = ((result.get('ui_data') or {}).get('generation_debug'))
|
||||
response_data['git_debug'] = ((result.get('ui_data') or {}).get('git'))
|
||||
response_data['features'] = request.features
|
||||
response_data['tech_stack'] = request.tech_stack
|
||||
response_data['project_root'] = result.get('project_root', str(_project_root(project_id)))
|
||||
@@ -385,6 +402,7 @@ async def _run_generation(
|
||||
'logs': [log.get('message', '') for log in response_data.get('logs', []) if isinstance(log, dict)],
|
||||
}
|
||||
summary_message, summary_trace = await ChangeSummaryGenerator().summarize_with_trace(summary_context)
|
||||
summary_message = _ensure_summary_mentions_pull_request(summary_message, response_data.get('pull_request'))
|
||||
if orchestrator.db_manager and orchestrator.history and orchestrator.prompt_audit:
|
||||
orchestrator.db_manager.log_llm_trace(
|
||||
project_id=project_id,
|
||||
@@ -400,8 +418,18 @@ async def _run_generation(
|
||||
fallback_used=summary_trace.get('fallback_used', False),
|
||||
)
|
||||
response_data['summary_message'] = summary_message
|
||||
response_data['summary_metadata'] = {
|
||||
'provider': summary_trace.get('provider'),
|
||||
'model': summary_trace.get('model'),
|
||||
'fallback_used': bool(summary_trace.get('fallback_used')),
|
||||
}
|
||||
response_data['pull_request'] = result.get('pull_request') or manager.get_open_pull_request(project_id=project_id)
|
||||
return {'status': result['status'], 'data': response_data, 'summary_message': summary_message}
|
||||
return {
|
||||
'status': result['status'],
|
||||
'data': response_data,
|
||||
'summary_message': summary_message,
|
||||
'summary_metadata': response_data['summary_metadata'],
|
||||
}
|
||||
|
||||
|
||||
def _project_root(project_id: str) -> Path:
|
||||
@@ -681,6 +709,7 @@ async def _prompt_queue_worker() -> None:
|
||||
"""Background worker that drains the prompt queue when the energy gate opens."""
|
||||
while True:
|
||||
try:
|
||||
if database_module.settings.prompt_queue_enabled and database_module.settings.prompt_queue_auto_process:
|
||||
await _process_prompt_queue_batch(
|
||||
limit=database_module.settings.prompt_queue_max_batch_size,
|
||||
force=database_module.settings.prompt_queue_force_process,
|
||||
@@ -719,6 +748,8 @@ def read_api_info():
|
||||
'/llm/runtime',
|
||||
'/llm/prompts',
|
||||
'/llm/prompts/{prompt_key}',
|
||||
'/settings/runtime',
|
||||
'/settings/runtime/{setting_key}',
|
||||
'/generate',
|
||||
'/generate/text',
|
||||
'/queue',
|
||||
@@ -798,6 +829,7 @@ def get_llm_prompt_settings(db: DbSession):
|
||||
@app.put('/llm/prompts/{prompt_key}')
|
||||
def update_llm_prompt_setting(prompt_key: str, request: LLMPromptSettingUpdateRequest, db: DbSession):
|
||||
"""Persist one editable LLM prompt override into the database."""
|
||||
database_module.init_db()
|
||||
result = DatabaseManager(db).save_llm_prompt_setting(prompt_key, request.value, actor='api')
|
||||
if result.get('status') == 'error':
|
||||
raise HTTPException(status_code=400, detail=result.get('message', 'Prompt save failed'))
|
||||
@@ -807,12 +839,39 @@ def update_llm_prompt_setting(prompt_key: str, request: LLMPromptSettingUpdateRe
|
||||
@app.delete('/llm/prompts/{prompt_key}')
|
||||
def reset_llm_prompt_setting(prompt_key: str, db: DbSession):
|
||||
"""Reset one editable LLM prompt override back to the environment/default value."""
|
||||
database_module.init_db()
|
||||
result = DatabaseManager(db).reset_llm_prompt_setting(prompt_key, actor='api')
|
||||
if result.get('status') == 'error':
|
||||
raise HTTPException(status_code=400, detail=result.get('message', 'Prompt reset failed'))
|
||||
return result
|
||||
|
||||
|
||||
@app.get('/settings/runtime')
|
||||
def get_runtime_settings(db: DbSession):
|
||||
"""Return editable runtime settings with DB overrides merged over environment defaults."""
|
||||
return {'settings': DatabaseManager(db).get_runtime_settings()}
|
||||
|
||||
|
||||
@app.put('/settings/runtime/{setting_key}')
|
||||
def update_runtime_setting(setting_key: str, request: RuntimeSettingUpdateRequest, db: DbSession):
|
||||
"""Persist one editable runtime setting override into the database."""
|
||||
database_module.init_db()
|
||||
result = DatabaseManager(db).save_runtime_setting(setting_key, request.value, actor='api')
|
||||
if result.get('status') == 'error':
|
||||
raise HTTPException(status_code=400, detail=result.get('message', 'Runtime setting save failed'))
|
||||
return result
|
||||
|
||||
|
||||
@app.delete('/settings/runtime/{setting_key}')
|
||||
def reset_runtime_setting(setting_key: str, db: DbSession):
|
||||
"""Reset one editable runtime setting override back to the environment/default value."""
|
||||
database_module.init_db()
|
||||
result = DatabaseManager(db).reset_runtime_setting(setting_key, actor='api')
|
||||
if result.get('status') == 'error':
|
||||
raise HTTPException(status_code=400, detail=result.get('message', 'Runtime setting reset failed'))
|
||||
return result
|
||||
|
||||
|
||||
@app.post('/generate')
|
||||
async def generate_software(request: SoftwareRequest, db: DbSession):
|
||||
"""Create and record a software-generation request."""
|
||||
|
||||
Reference in New Issue
Block a user