Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d53f3fe207 | |||
| 4f1d757dd8 | |||
| ac75cc2e3a | |||
| f7f00d4e14 | |||
| 1c539d5f60 | |||
| 64fcd2967c | |||
| 4d050ff527 | |||
| 1944e2a9cf | |||
| 7e4066c609 | |||
| 4eeec5d808 | |||
| cbbed83915 | |||
| 1e72bc9a28 | |||
| b0c95323fd | |||
| d60e753acf | |||
| 94c38359c7 | |||
| 2943fc79ab | |||
| 3e40338bbf | |||
| 39f9651236 | |||
| 3175c53504 | |||
| 29cf2aa6bd | |||
| b881ef635a | |||
| e35db0a361 | |||
| 798bb218f8 | |||
| 3d77ac3104 | |||
| f6681a0f85 | |||
| ed8dc48280 | |||
| c3cf8da42d | |||
| e495775b91 | |||
| 356c388efb | |||
| fd812476cc | |||
| 032139c14f | |||
| 194d5658a6 | |||
| b9faac8d16 | |||
| 80d7716e65 | |||
| 321bf74aef | |||
| 55ee75106c |
@@ -12,7 +12,10 @@ WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
&& update-ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install dependencies
|
||||
|
||||
175
HISTORY.md
175
HISTORY.md
@@ -5,10 +5,185 @@ Changelog
|
||||
(unreleased)
|
||||
------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- More git integration fixes, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
|
||||
0.9.9 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Add missing git binary, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.8 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- More file change fixes, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.7 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- More file generation improvements, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.6 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Repo onboarding fix, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.5 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Better code generation, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.4 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Add commit retry, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.3 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Better home assistant integration, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.2 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- UI improvements and prompt hardening, refs NOISSUE. [Simon
|
||||
Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.1 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Better repo name generation, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.9.0 (2026-04-11)
|
||||
------------------
|
||||
- Feat: editable guardrails, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
|
||||
0.8.0 (2026-04-11)
|
||||
------------------
|
||||
- Feat: better dashboard reloading mechanism, refs NOISSUE. [Simon
|
||||
Diesenreiter]
|
||||
- Feat: add explicit workflow steps and guardrail prompts, refs NOISSUE.
|
||||
[Simon Diesenreiter]
|
||||
|
||||
|
||||
0.7.1 (2026-04-11)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Add additional deletion confirmation, refs NOISSUE. [Simon
|
||||
Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.7.0 (2026-04-10)
|
||||
------------------
|
||||
- Feat: gitea issue integration, refs NOISSUE. [Simon Diesenreiter]
|
||||
- Feat: better history data, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
|
||||
0.6.5 (2026-04-10)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Better n8n workflow, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.6.4 (2026-04-10)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Add Telegram helper functions, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.6.3 (2026-04-10)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- N8n workflow generation, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.6.2 (2026-04-10)
|
||||
------------------
|
||||
|
||||
Fix
|
||||
~~~
|
||||
- Fix Quasar layout issues, refs NOISSUE. [Simon Diesenreiter]
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
|
||||
|
||||
0.6.1 (2026-04-10)
|
||||
------------------
|
||||
|
||||
28
README.md
28
README.md
@@ -48,6 +48,7 @@ OLLAMA_URL=http://localhost:11434
|
||||
OLLAMA_MODEL=llama3
|
||||
|
||||
# Gitea
|
||||
# Host-only values such as git.disi.dev are normalized to https://git.disi.dev.
|
||||
GITEA_URL=https://gitea.yourserver.com
|
||||
GITEA_TOKEN=your_gitea_api_token
|
||||
GITEA_OWNER=ai-software-factory
|
||||
@@ -69,6 +70,12 @@ N8N_WEBHOOK_URL=http://n8n.yourserver.com/webhook/telegram
|
||||
# Telegram
|
||||
TELEGRAM_BOT_TOKEN=your_telegram_bot_token
|
||||
TELEGRAM_CHAT_ID=your_chat_id
|
||||
|
||||
# Optional: Home Assistant integration.
|
||||
# Only the base URL and token are required in the environment.
|
||||
# Entity ids, thresholds, and queue behavior can be configured from the dashboard System tab and are stored in the database.
|
||||
HOME_ASSISTANT_URL=http://homeassistant.local:8123
|
||||
HOME_ASSISTANT_TOKEN=your_home_assistant_long_lived_token
|
||||
```
|
||||
|
||||
### Build and Run
|
||||
@@ -86,11 +93,15 @@ docker-compose up -d
|
||||
1. **Send a request via Telegram:**
|
||||
|
||||
```
|
||||
Name: My Awesome App
|
||||
Description: A web application for managing tasks
|
||||
Features: user authentication, task CRUD, notifications
|
||||
Build an internal task management app for our operations team.
|
||||
It should support user authentication, task CRUD, notifications, and reporting.
|
||||
Prefer FastAPI with PostgreSQL and a simple web dashboard.
|
||||
```
|
||||
|
||||
The backend now interprets free-form Telegram text with Ollama before generation.
|
||||
If `TELEGRAM_CHAT_ID` is set, the Telegram-trigger workflow only reacts to messages from that specific chat.
|
||||
If queueing is enabled from the dashboard System tab, Telegram prompts are stored in a durable queue and processed only when the configured Home Assistant battery and surplus thresholds are satisfied, unless you force processing via `/queue/process` or send `process_now=true`.
|
||||
|
||||
2. **Monitor progress via Web UI:**
|
||||
|
||||
Open `http://yourserver:8000/` to see the dashboard and `http://yourserver:8000/api` for API metadata
|
||||
@@ -101,6 +112,16 @@ docker-compose up -d
|
||||
|
||||
If you deploy the container with PostgreSQL environment variables set, the service now selects PostgreSQL automatically even though SQLite remains the default for local/test usage.
|
||||
|
||||
The health tab now shows separate application, n8n, Gitea, and Home Assistant/queue diagnostics so misconfigured integrations are visible without checking container logs.
|
||||
|
||||
The dashboard Health tab exposes operator controls for the prompt queue, including manual batch processing, forced processing, and retrying failed items.
|
||||
|
||||
The dashboard System tab now also stores Home Assistant entity ids, queue toggles, thresholds, and batch settings in the database, so the environment only needs `HOME_ASSISTANT_URL` and `HOME_ASSISTANT_TOKEN` for that integration.
|
||||
|
||||
Projects that show `uncommitted`, `local_only`, or `pushed_no_pr` delivery warnings in the dashboard can now be retried in place from the UI before resorting to purging orphan audit rows.
|
||||
|
||||
Guardrail and system prompts are no longer environment-only in practice: the factory can persist DB-backed overrides for the editable LLM prompt set, expose them at `/llm/prompts`, and edit them from the dashboard System tab. Environment values still act as defaults and as the reset target.
|
||||
|
||||
## API Endpoints
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
@@ -109,6 +130,7 @@ If you deploy the container with PostgreSQL environment variables set, the servi
|
||||
| `/api` | GET | API information |
|
||||
| `/health` | GET | Health check |
|
||||
| `/generate` | POST | Generate new software |
|
||||
| `/generate/text` | POST | Interpret free-form text and generate software |
|
||||
| `/status/{project_id}` | GET | Get project status |
|
||||
| `/projects` | GET | List all projects |
|
||||
|
||||
|
||||
@@ -8,10 +8,23 @@ LOG_LEVEL=INFO
|
||||
# Ollama
|
||||
OLLAMA_URL=http://localhost:11434
|
||||
OLLAMA_MODEL=llama3
|
||||
LLM_GUARDRAIL_PROMPT=You are operating inside AI Software Factory. Follow supplied schemas exactly and treat service-provided tool outputs as authoritative.
|
||||
LLM_REQUEST_INTERPRETER_GUARDRAIL_PROMPT=Never route work to archived projects and only reference issues that are explicit in the prompt or supplied tool outputs.
|
||||
LLM_CHANGE_SUMMARY_GUARDRAIL_PROMPT=Only summarize delivery facts that appear in the provided project context or tool outputs.
|
||||
LLM_PROJECT_NAMING_GUARDRAIL_PROMPT=Prefer clear product names and repository slugs that reflect the new request without colliding with tracked projects.
|
||||
LLM_PROJECT_NAMING_SYSTEM_PROMPT=Return JSON with project_name, repo_name, and rationale for new projects.
|
||||
LLM_PROJECT_ID_GUARDRAIL_PROMPT=Prefer short stable project ids and avoid collisions with existing project ids.
|
||||
LLM_PROJECT_ID_SYSTEM_PROMPT=Return JSON with project_id and rationale for new projects.
|
||||
LLM_TOOL_ALLOWLIST=gitea_project_catalog,gitea_project_state,gitea_project_issues,gitea_pull_requests
|
||||
LLM_TOOL_CONTEXT_LIMIT=5
|
||||
LLM_LIVE_TOOL_ALLOWLIST=gitea_lookup_issue,gitea_lookup_pull_request
|
||||
LLM_LIVE_TOOL_STAGE_ALLOWLIST=request_interpretation,change_summary
|
||||
LLM_LIVE_TOOL_STAGE_TOOL_MAP={"request_interpretation": ["gitea_lookup_issue", "gitea_lookup_pull_request"], "change_summary": []}
|
||||
LLM_MAX_TOOL_CALL_ROUNDS=1
|
||||
|
||||
# Gitea
|
||||
# Configure Gitea API for your organization
|
||||
# GITEA_URL can be left empty to use GITEA_ORGANIZATION instead of GITEA_OWNER
|
||||
# Host-only values such as git.disi.dev are normalized to https://git.disi.dev automatically.
|
||||
GITEA_URL=https://gitea.yourserver.com
|
||||
GITEA_TOKEN=your_gitea_api_token
|
||||
GITEA_OWNER=your_organization_name
|
||||
@@ -29,6 +42,12 @@ N8N_PASSWORD=your_secure_password
|
||||
TELEGRAM_BOT_TOKEN=your_telegram_bot_token
|
||||
TELEGRAM_CHAT_ID=your_chat_id
|
||||
|
||||
# Home Assistant energy gate for queued Telegram prompts
|
||||
# Only the base URL and token are environment-backed.
|
||||
# Queue toggles, entity ids, thresholds, and batch sizing can be edited in the dashboard System tab and are stored in the database.
|
||||
HOME_ASSISTANT_URL=http://homeassistant.local:8123
|
||||
HOME_ASSISTANT_TOKEN=your_home_assistant_long_lived_token
|
||||
|
||||
# PostgreSQL
|
||||
# In production, provide PostgreSQL settings below. They now take precedence over the SQLite default.
|
||||
# You can also set USE_SQLITE=false explicitly if you want the intent to be obvious.
|
||||
|
||||
@@ -6,6 +6,7 @@ Automated software generation service powered by Ollama LLM. This service allows
|
||||
|
||||
- **Telegram Integration**: Receive software requests via Telegram bot
|
||||
- **Ollama LLM**: Uses Ollama-hosted models for code generation
|
||||
- **LLM Guardrails and Tools**: Centralized guardrail prompts plus mediated tool payloads for project, Gitea, PR, and issue context
|
||||
- **Git Integration**: Automatically commits code to gitea
|
||||
- **Pull Requests**: Creates PRs for user review before merging
|
||||
- **Web UI**: Beautiful dashboard for monitoring project progress
|
||||
@@ -46,12 +47,26 @@ PORT=8000
|
||||
# Ollama
|
||||
OLLAMA_URL=http://localhost:11434
|
||||
OLLAMA_MODEL=llama3
|
||||
LLM_GUARDRAIL_PROMPT=You are operating inside AI Software Factory. Follow supplied schemas exactly and treat service-provided tool outputs as authoritative.
|
||||
LLM_REQUEST_INTERPRETER_GUARDRAIL_PROMPT=Never route work to archived projects and only reference issues that are explicit in the prompt or supplied tool outputs.
|
||||
LLM_CHANGE_SUMMARY_GUARDRAIL_PROMPT=Only summarize delivery facts that appear in the provided project context or tool outputs.
|
||||
LLM_PROJECT_NAMING_GUARDRAIL_PROMPT=Prefer clear product names and repository slugs that reflect the new request without colliding with tracked projects.
|
||||
LLM_PROJECT_NAMING_SYSTEM_PROMPT=Return JSON with project_name, repo_name, and rationale for new projects.
|
||||
LLM_PROJECT_ID_GUARDRAIL_PROMPT=Prefer short stable project ids and avoid collisions with existing project ids.
|
||||
LLM_PROJECT_ID_SYSTEM_PROMPT=Return JSON with project_id and rationale for new projects.
|
||||
LLM_TOOL_ALLOWLIST=gitea_project_catalog,gitea_project_state,gitea_project_issues,gitea_pull_requests
|
||||
LLM_TOOL_CONTEXT_LIMIT=5
|
||||
LLM_LIVE_TOOL_ALLOWLIST=gitea_lookup_issue,gitea_lookup_pull_request
|
||||
LLM_LIVE_TOOL_STAGE_ALLOWLIST=request_interpretation,change_summary
|
||||
LLM_LIVE_TOOL_STAGE_TOOL_MAP={"request_interpretation": ["gitea_lookup_issue", "gitea_lookup_pull_request"], "change_summary": []}
|
||||
LLM_MAX_TOOL_CALL_ROUNDS=1
|
||||
|
||||
# Gitea
|
||||
# Host-only values such as git.disi.dev are normalized to https://git.disi.dev.
|
||||
GITEA_URL=https://gitea.yourserver.com
|
||||
GITEA_TOKEN= analyze your_gitea_api_token
|
||||
GITEA_TOKEN=your_gitea_api_token
|
||||
GITEA_OWNER=ai-software-factory
|
||||
GITEA_REPO=ai-software-factory
|
||||
GITEA_REPO=
|
||||
|
||||
# n8n
|
||||
N8N_WEBHOOK_URL=http://n8n.yourserver.com/webhook/telegram
|
||||
@@ -59,6 +74,12 @@ N8N_WEBHOOK_URL=http://n8n.yourserver.com/webhook/telegram
|
||||
# Telegram
|
||||
TELEGRAM_BOT_TOKEN=your_telegram_bot_token
|
||||
TELEGRAM_CHAT_ID=your_chat_id
|
||||
|
||||
# Optional: Home Assistant integration.
|
||||
# Only the base URL and token are required in the environment.
|
||||
# Entity ids, thresholds, and queue behavior can be configured from the dashboard System tab and are stored in the database.
|
||||
HOME_ASSISTANT_URL=http://homeassistant.local:8123
|
||||
HOME_ASSISTANT_TOKEN=your_home_assistant_long_lived_token
|
||||
```
|
||||
|
||||
### Build and Run
|
||||
@@ -81,6 +102,10 @@ docker-compose up -d
|
||||
Features: user authentication, task CRUD, notifications
|
||||
```
|
||||
|
||||
If queueing is enabled from the dashboard System tab, Telegram prompts are queued durably and processed only when Home Assistant reports the configured battery and surplus thresholds. Operators can override the gate via `/queue/process` or by sending `process_now=true` to `/generate/text`.
|
||||
|
||||
The dashboard System tab stores Home Assistant entity ids, queue toggles, thresholds, and batch settings in the database, so the environment only needs `HOME_ASSISTANT_URL` and `HOME_ASSISTANT_TOKEN` for that integration.
|
||||
|
||||
2. **Monitor progress via Web UI:**
|
||||
|
||||
Open `http://yourserver:8000` to see real-time progress
|
||||
@@ -99,6 +124,39 @@ docker-compose up -d
|
||||
| `/status/{project_id}` | GET | Get project status |
|
||||
| `/projects` | GET | List all projects |
|
||||
|
||||
## LLM Guardrails and Tool Access
|
||||
|
||||
External LLM calls are now routed through a centralized client that applies:
|
||||
|
||||
- A global guardrail prompt for every outbound model request
|
||||
- Stage-specific guardrails for request interpretation and change summaries
|
||||
- Service-mediated tool outputs that expose tracked Gitea/project state without giving the model raw credentials
|
||||
|
||||
Current mediated tools include:
|
||||
|
||||
- `gitea_project_catalog`: active tracked projects and repository mappings
|
||||
- `gitea_project_state`: current repository, PR, and linked-issue state for the project in scope
|
||||
- `gitea_project_issues`: tracked open issues for the relevant repository
|
||||
- `gitea_pull_requests`: tracked pull requests for the relevant repository
|
||||
|
||||
The service also supports a bounded live tool-call loop for selected lookups. When enabled, the model may request one live call such as `gitea_lookup_issue` or `gitea_lookup_pull_request`, the service executes it against Gitea, and the final model response is generated from the returned result. This remains mediated by the service, so the model never receives raw credentials.
|
||||
|
||||
Live tool access is stage-aware. `LLM_LIVE_TOOL_ALLOWLIST` controls which live tools exist globally, while `LLM_LIVE_TOOL_STAGE_ALLOWLIST` controls which LLM stages may use them. If you need per-stage subsets, `LLM_LIVE_TOOL_STAGE_TOOL_MAP` accepts a JSON object mapping each stage to the exact tools it may use. For example, you can allow issue and PR lookups during `request_interpretation` while keeping `change_summary` fully read-only.
|
||||
|
||||
When the interpreter decides a prompt starts a new project, the service can run a dedicated `project_naming` LLM stage before generation. `LLM_PROJECT_NAMING_SYSTEM_PROMPT` and `LLM_PROJECT_NAMING_GUARDRAIL_PROMPT` let you steer how project titles and repository slugs are chosen. The interpreter now checks tracked project repositories plus live Gitea repository names when available, so if the model suggests a colliding repo slug the service will automatically move to the next available slug.
|
||||
|
||||
New project creation can also run a dedicated `project_id_naming` stage. `LLM_PROJECT_ID_SYSTEM_PROMPT` and `LLM_PROJECT_ID_GUARDRAIL_PROMPT` control how stable project ids are chosen, and the service will append deterministic numeric suffixes when an id is already taken instead of always falling back to a random UUID-based id.
|
||||
|
||||
Runtime visibility for the active guardrails, mediated tools, live tools, and model configuration is available at `/llm/runtime` and in the dashboard System tab.
|
||||
|
||||
Operational visibility for the Gitea integration, Home Assistant energy gate, and queued prompt counts is available in the dashboard Health tab, plus `/gitea/health`, `/home-assistant/health`, and `/queue`.
|
||||
|
||||
The dashboard Health tab also includes operator controls for manually processing queued Telegram prompts, force-processing them when needed, and retrying failed items.
|
||||
|
||||
Editable guardrail and system prompts are persisted in the database as overrides on top of the environment defaults. The current merged values are available at `/llm/prompts`, and the dashboard System tab can edit or reset them without restarting the service.
|
||||
|
||||
These tool payloads are appended to the model prompt as authoritative JSON generated by the service, so the LLM can reason over live project and Gitea context while remaining constrained by the configured guardrails.
|
||||
|
||||
## Development
|
||||
|
||||
### Makefile Targets
|
||||
|
||||
@@ -1 +1 @@
|
||||
0.6.2
|
||||
0.9.10
|
||||
|
||||
125
ai_software_factory/agents/change_summary.py
Normal file
125
ai_software_factory/agents/change_summary.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""Generate concise chat-friendly summaries of software generation results."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
try:
|
||||
from ..config import settings
|
||||
from .llm_service import LLMServiceClient
|
||||
except ImportError:
|
||||
from config import settings
|
||||
from agents.llm_service import LLMServiceClient
|
||||
|
||||
|
||||
class ChangeSummaryGenerator:
|
||||
"""Create a readable overview of generated changes for chat responses."""
|
||||
|
||||
def __init__(self, ollama_url: str | None = None, model: str | None = None):
|
||||
self.ollama_url = (ollama_url or settings.ollama_url).rstrip('/')
|
||||
self.model = model or settings.OLLAMA_MODEL
|
||||
self.llm_client = LLMServiceClient(ollama_url=self.ollama_url, model=self.model)
|
||||
|
||||
async def summarize(self, context: dict) -> str:
|
||||
"""Summarize project changes with Ollama, or fall back to a deterministic overview."""
|
||||
summary, _trace = await self.summarize_with_trace(context)
|
||||
return summary
|
||||
|
||||
async def summarize_with_trace(self, context: dict) -> tuple[str, dict]:
|
||||
"""Summarize project changes with Ollama, or fall back to a deterministic overview."""
|
||||
prompt = self._prompt(context)
|
||||
system_prompt = (
|
||||
'You write concise but informative mobile chat summaries of software delivery work. '
|
||||
'Write 3 to 5 sentences. Mention the application goal, main delivered pieces, '
|
||||
'technical direction, and what the user should expect next. Avoid markdown bullets.'
|
||||
)
|
||||
content, trace = await self.llm_client.chat_with_trace(
|
||||
stage='change_summary',
|
||||
system_prompt=system_prompt,
|
||||
user_prompt=prompt,
|
||||
tool_context_input={
|
||||
'project_id': context.get('project_id'),
|
||||
'project_name': context.get('name'),
|
||||
'repository': context.get('repository'),
|
||||
'repository_url': context.get('repository_url'),
|
||||
'pull_request': context.get('pull_request'),
|
||||
'pull_request_url': context.get('pull_request_url'),
|
||||
'pull_request_state': context.get('pull_request_state'),
|
||||
'related_issue': context.get('related_issue'),
|
||||
'issues': [context.get('related_issue')] if context.get('related_issue') else [],
|
||||
},
|
||||
)
|
||||
if content:
|
||||
return content.strip(), trace
|
||||
|
||||
fallback = self._fallback(context)
|
||||
return fallback, {
|
||||
'stage': 'change_summary',
|
||||
'provider': 'fallback',
|
||||
'model': self.model,
|
||||
'system_prompt': system_prompt,
|
||||
'user_prompt': prompt,
|
||||
'assistant_response': fallback,
|
||||
'raw_response': {'fallback': 'deterministic', 'llm_trace': trace.get('raw_response') if isinstance(trace, dict) else None},
|
||||
'guardrails': trace.get('guardrails') if isinstance(trace, dict) else [],
|
||||
'tool_context': trace.get('tool_context') if isinstance(trace, dict) else [],
|
||||
'fallback_used': True,
|
||||
}
|
||||
|
||||
def _prompt(self, context: dict) -> str:
|
||||
features = ', '.join(context.get('features') or []) or 'No explicit features recorded'
|
||||
tech_stack = ', '.join(context.get('tech_stack') or []) or 'No explicit tech stack recorded'
|
||||
changed_files = ', '.join(context.get('changed_files') or []) or 'No files recorded'
|
||||
logs = ' | '.join((context.get('logs') or [])[:4]) or 'No log excerpts'
|
||||
return (
|
||||
f"Project name: {context.get('name', 'Unknown project')}\n"
|
||||
f"Description: {context.get('description', '')}\n"
|
||||
f"Features: {features}\n"
|
||||
f"Tech stack: {tech_stack}\n"
|
||||
f"Changed files: {changed_files}\n"
|
||||
f"Repository: {context.get('repository_url') or 'No repository URL'}\n"
|
||||
f"Pull request: {context.get('pull_request_url') or 'No pull request URL'}\n"
|
||||
f"Pull request state: {context.get('pull_request_state') or 'No pull request state'}\n"
|
||||
f"Status message: {context.get('message') or ''}\n"
|
||||
f"Log excerpts: {logs}\n"
|
||||
"Write a broad but phone-friendly summary of what was done."
|
||||
)
|
||||
|
||||
def _fallback(self, context: dict) -> str:
|
||||
name = context.get('name', 'The project')
|
||||
description = context.get('description') or 'a software request'
|
||||
changed_files = context.get('changed_files') or []
|
||||
features = context.get('features') or []
|
||||
tech_stack = context.get('tech_stack') or []
|
||||
repo_url = context.get('repository_url')
|
||||
repo_status = context.get('repository_status')
|
||||
pr_url = context.get('pull_request_url')
|
||||
pr_state = context.get('pull_request_state')
|
||||
|
||||
first_sentence = f"{name} was generated from your request for {description}."
|
||||
feature_sentence = (
|
||||
f"The delivery focused on {', '.join(features[:3])}."
|
||||
if features else
|
||||
"The delivery focused on turning the request into an initial runnable application skeleton."
|
||||
)
|
||||
tech_sentence = (
|
||||
f"The generated implementation currently targets {', '.join(tech_stack[:3])}."
|
||||
if tech_stack else
|
||||
"The implementation was created with the current default stack configured for the factory."
|
||||
)
|
||||
file_sentence = (
|
||||
f"Key artifacts were updated across {len(changed_files)} files, including {', '.join(changed_files[:3])}."
|
||||
if changed_files else
|
||||
"The service completed the generation flow, but no changed file list was returned."
|
||||
)
|
||||
if repo_url:
|
||||
repo_sentence = f"The resulting project is tracked at {repo_url}."
|
||||
elif repo_status in {'pending', 'skipped', 'error'}:
|
||||
repo_sentence = "Repository provisioning was not confirmed, so review the Gitea status in the dashboard before assuming a remote repo exists."
|
||||
else:
|
||||
repo_sentence = "The project is ready for further review in the dashboard."
|
||||
if pr_url and pr_state == 'open':
|
||||
pr_sentence = f"An open pull request is ready for review at {pr_url}, and later prompts will continue updating that same PR until it is merged."
|
||||
elif pr_url:
|
||||
pr_sentence = f"The latest pull request is available at {pr_url}."
|
||||
else:
|
||||
pr_sentence = "No pull request link was recorded for this delivery."
|
||||
return ' '.join([first_sentence, feature_sentence, tech_sentence, file_sentence, repo_sentence, pr_sentence])
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,9 @@
|
||||
"""Git manager for project operations."""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
@@ -14,53 +16,140 @@ except ImportError:
|
||||
class GitManager:
|
||||
"""Manages git operations for the project."""
|
||||
|
||||
def __init__(self, project_id: str):
|
||||
def __init__(self, project_id: str, project_dir: str | None = None):
|
||||
if not project_id:
|
||||
raise ValueError("project_id cannot be empty or None")
|
||||
self.project_id = project_id
|
||||
project_path = Path(project_id)
|
||||
if project_path.is_absolute() or len(project_path.parts) > 1:
|
||||
resolved = project_path.expanduser().resolve()
|
||||
if project_dir:
|
||||
resolved = Path(project_dir).expanduser().resolve()
|
||||
else:
|
||||
base_root = settings.projects_root
|
||||
if base_root.name != "test-project":
|
||||
base_root = base_root / "test-project"
|
||||
resolved = (base_root / project_id).resolve()
|
||||
project_path = Path(project_id)
|
||||
if project_path.is_absolute() or len(project_path.parts) > 1:
|
||||
resolved = project_path.expanduser().resolve()
|
||||
else:
|
||||
base_root = settings.projects_root
|
||||
if base_root.name != "test-project":
|
||||
base_root = base_root / "test-project"
|
||||
resolved = (base_root / project_id).resolve()
|
||||
self.project_dir = str(resolved)
|
||||
|
||||
def is_git_available(self) -> bool:
|
||||
"""Return whether the git executable is available in the current environment."""
|
||||
return shutil.which('git') is not None
|
||||
|
||||
def _ensure_git_available(self) -> None:
|
||||
"""Raise a clear error when git is not installed in the runtime environment."""
|
||||
if not self.is_git_available():
|
||||
raise RuntimeError('git executable is not available in PATH')
|
||||
|
||||
def _run(self, args: list[str], env: dict | None = None, check: bool = True) -> subprocess.CompletedProcess:
|
||||
"""Run a git command in the project directory."""
|
||||
self._ensure_git_available()
|
||||
return subprocess.run(
|
||||
args,
|
||||
check=check,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=self.project_dir,
|
||||
env=env,
|
||||
)
|
||||
|
||||
def has_repo(self) -> bool:
|
||||
"""Return whether the project directory already contains a git repository."""
|
||||
return Path(self.project_dir, '.git').exists()
|
||||
|
||||
def init_repo(self):
|
||||
"""Initialize git repository."""
|
||||
os.makedirs(self.project_dir, exist_ok=True)
|
||||
os.chdir(self.project_dir)
|
||||
subprocess.run(["git", "init"], check=True, capture_output=True)
|
||||
self._run(["git", "init", "-b", "main"])
|
||||
self._run(["git", "config", "user.name", "AI Software Factory"])
|
||||
self._run(["git", "config", "user.email", "factory@local.invalid"])
|
||||
|
||||
def add_files(self, paths: list[str]):
|
||||
"""Add files to git staging."""
|
||||
subprocess.run(["git", "add"] + paths, check=True, capture_output=True)
|
||||
self._run(["git", "add"] + paths)
|
||||
|
||||
def checkout_branch(self, branch_name: str, create: bool = False, start_point: str | None = None) -> None:
|
||||
"""Switch to a branch, optionally creating it from a start point."""
|
||||
if create:
|
||||
args = ["git", "checkout", "-B", branch_name]
|
||||
if start_point:
|
||||
args.append(start_point)
|
||||
self._run(args)
|
||||
return
|
||||
self._run(["git", "checkout", branch_name])
|
||||
|
||||
def branch_exists(self, branch_name: str) -> bool:
|
||||
"""Return whether a local branch exists."""
|
||||
result = self._run(["git", "show-ref", "--verify", f"refs/heads/{branch_name}"], check=False)
|
||||
return result.returncode == 0
|
||||
|
||||
def commit(self, message: str):
|
||||
def commit(self, message: str) -> str:
|
||||
"""Create a git commit."""
|
||||
subprocess.run(
|
||||
["git", "commit", "-m", message],
|
||||
check=True,
|
||||
capture_output=True
|
||||
)
|
||||
self._run(["git", "commit", "-m", message])
|
||||
return self.current_head()
|
||||
|
||||
def create_empty_commit(self, message: str) -> str:
|
||||
"""Create an empty commit."""
|
||||
self._run(["git", "commit", "--allow-empty", "-m", message])
|
||||
return self.current_head()
|
||||
|
||||
def push(self, remote: str = "origin", branch: str = "main"):
|
||||
"""Push changes to remote."""
|
||||
subprocess.run(
|
||||
["git", "push", "-u", remote, branch],
|
||||
check=True,
|
||||
capture_output=True
|
||||
)
|
||||
self._run(["git", "push", "-u", remote, branch])
|
||||
|
||||
def ensure_remote(self, remote: str, url: str) -> None:
|
||||
"""Create or update a remote URL."""
|
||||
result = self._run(["git", "remote", "get-url", remote], check=False)
|
||||
if result.returncode == 0:
|
||||
self._run(["git", "remote", "set-url", remote, url])
|
||||
else:
|
||||
self._run(["git", "remote", "add", remote, url])
|
||||
|
||||
def push_with_credentials(
|
||||
self,
|
||||
remote_url: str,
|
||||
username: str,
|
||||
password: str,
|
||||
remote: str = "origin",
|
||||
branch: str = "main",
|
||||
) -> None:
|
||||
"""Push to a remote over HTTPS using an askpass helper."""
|
||||
os.makedirs(self.project_dir, exist_ok=True)
|
||||
self.ensure_remote(remote, remote_url)
|
||||
helper_contents = "#!/bin/sh\ncase \"$1\" in\n *Username*) printf '%s\\n' \"$GIT_ASKPASS_USERNAME\" ;;\n *) printf '%s\\n' \"$GIT_ASKPASS_PASSWORD\" ;;\nesac\n"
|
||||
helper_path: str | None = None
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile('w', delete=False, dir=self.project_dir, prefix='git-askpass-', suffix='.sh') as helper_file:
|
||||
helper_file.write(helper_contents)
|
||||
helper_path = helper_file.name
|
||||
os.chmod(helper_path, 0o700)
|
||||
env = os.environ.copy()
|
||||
env.update(
|
||||
{
|
||||
"GIT_TERMINAL_PROMPT": "0",
|
||||
"GIT_ASKPASS": helper_path,
|
||||
"GIT_ASKPASS_USERNAME": username,
|
||||
"GIT_ASKPASS_PASSWORD": password,
|
||||
}
|
||||
)
|
||||
self._run(["git", "push", "-u", remote, branch], env=env)
|
||||
finally:
|
||||
if helper_path:
|
||||
Path(helper_path).unlink(missing_ok=True)
|
||||
|
||||
def create_branch(self, branch_name: str):
|
||||
"""Create and switch to a new branch."""
|
||||
subprocess.run(
|
||||
["git", "checkout", "-b", branch_name],
|
||||
check=True,
|
||||
capture_output=True
|
||||
)
|
||||
self._run(["git", "checkout", "-b", branch_name])
|
||||
|
||||
def revert_commit(self, commit_hash: str, no_edit: bool = True) -> str:
|
||||
"""Revert a commit and return the new HEAD."""
|
||||
args = ["git", "revert"]
|
||||
if no_edit:
|
||||
args.append("--no-edit")
|
||||
args.append(commit_hash)
|
||||
self._run(args)
|
||||
return self.current_head()
|
||||
|
||||
def create_pr(
|
||||
self,
|
||||
@@ -84,6 +173,18 @@ class GitManager:
|
||||
result = subprocess.run(
|
||||
["git", "status", "--porcelain"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
text=True,
|
||||
cwd=self.project_dir,
|
||||
)
|
||||
return result.stdout.strip()
|
||||
|
||||
def current_head(self) -> str:
|
||||
"""Return the current commit hash."""
|
||||
return self._run(["git", "rev-parse", "HEAD"]).stdout.strip()
|
||||
|
||||
def current_head_or_none(self) -> str | None:
|
||||
"""Return the current commit hash when the repository already has commits."""
|
||||
result = self._run(["git", "rev-parse", "HEAD"], check=False)
|
||||
if result.returncode != 0:
|
||||
return None
|
||||
return result.stdout.strip() or None
|
||||
|
||||
@@ -1,6 +1,23 @@
|
||||
"""Gitea API integration for repository and pull request operations."""
|
||||
|
||||
import os
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
import json
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
def _normalize_base_url(base_url: str) -> str:
|
||||
"""Normalize host-only service addresses into valid absolute URLs."""
|
||||
normalized = (base_url or '').strip().rstrip('/')
|
||||
if not normalized:
|
||||
return ''
|
||||
if '://' not in normalized:
|
||||
normalized = f'https://{normalized}'
|
||||
parsed = urlparse(normalized)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
return ''
|
||||
return normalized
|
||||
|
||||
|
||||
class GiteaAPI:
|
||||
@@ -8,7 +25,7 @@ class GiteaAPI:
|
||||
|
||||
def __init__(self, token: str, base_url: str, owner: str | None = None, repo: str | None = None):
|
||||
self.token = token
|
||||
self.base_url = base_url.rstrip("/")
|
||||
self.base_url = _normalize_base_url(base_url)
|
||||
self.owner = owner
|
||||
self.repo = repo
|
||||
self.headers = {
|
||||
@@ -23,7 +40,7 @@ class GiteaAPI:
|
||||
owner = os.getenv("GITEA_OWNER", "ai-test")
|
||||
repo = os.getenv("GITEA_REPO", "")
|
||||
return {
|
||||
"base_url": base_url.rstrip("/"),
|
||||
"base_url": _normalize_base_url(base_url),
|
||||
"token": token,
|
||||
"owner": owner,
|
||||
"repo": repo,
|
||||
@@ -41,6 +58,50 @@ class GiteaAPI:
|
||||
"""Build a Gitea API URL from a relative path."""
|
||||
return f"{self.base_url}/api/v1/{path.lstrip('/')}"
|
||||
|
||||
def _normalize_pull_request_head(self, head: str | None, owner: str | None = None) -> str | None:
|
||||
"""Return a Gitea-compatible head ref for pull request creation."""
|
||||
normalized = (head or '').strip()
|
||||
if not normalized:
|
||||
return None
|
||||
if ':' in normalized:
|
||||
return normalized
|
||||
effective_owner = (owner or self.owner or '').strip()
|
||||
if not effective_owner:
|
||||
return normalized
|
||||
return f"{effective_owner}:{normalized}"
|
||||
|
||||
def build_repo_git_url(self, owner: str | None = None, repo: str | None = None) -> str | None:
|
||||
"""Build the clone URL for a repository."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
if not _owner or not _repo:
|
||||
return None
|
||||
return f"{self.base_url}/{_owner}/{_repo}.git"
|
||||
|
||||
def build_commit_url(self, commit_hash: str, owner: str | None = None, repo: str | None = None) -> str | None:
|
||||
"""Build a browser URL for a commit."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
if not _owner or not _repo or not commit_hash:
|
||||
return None
|
||||
return f"{self.base_url}/{_owner}/{_repo}/commit/{commit_hash}"
|
||||
|
||||
def build_compare_url(self, base_ref: str, head_ref: str, owner: str | None = None, repo: str | None = None) -> str | None:
|
||||
"""Build a browser URL for a compare view."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
if not _owner or not _repo or not base_ref or not head_ref:
|
||||
return None
|
||||
return f"{self.base_url}/{_owner}/{_repo}/compare/{base_ref}...{head_ref}"
|
||||
|
||||
def build_pull_request_url(self, pr_number: int, owner: str | None = None, repo: str | None = None) -> str | None:
|
||||
"""Build a browser URL for a pull request."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
if not _owner or not _repo or not pr_number:
|
||||
return None
|
||||
return f"{self.base_url}/{_owner}/{_repo}/pulls/{pr_number}"
|
||||
|
||||
async def _request(self, method: str, path: str, payload: dict | None = None) -> dict:
|
||||
"""Perform a Gitea API request and normalize the response."""
|
||||
try:
|
||||
@@ -59,6 +120,30 @@ class GiteaAPI:
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
def _request_sync(self, method: str, path: str, payload: dict | None = None) -> dict:
|
||||
"""Perform a synchronous Gitea API request."""
|
||||
try:
|
||||
if not self.base_url:
|
||||
return {'error': 'Gitea base URL is not configured or is invalid'}
|
||||
request = urllib.request.Request(
|
||||
self._api_url(path),
|
||||
headers=self.get_auth_headers(),
|
||||
method=method.upper(),
|
||||
)
|
||||
if payload is not None:
|
||||
request.data = json.dumps(payload).encode('utf-8')
|
||||
with urllib.request.urlopen(request) as response:
|
||||
body = response.read().decode('utf-8')
|
||||
return json.loads(body) if body else {}
|
||||
except urllib.error.HTTPError as exc:
|
||||
try:
|
||||
body = exc.read().decode('utf-8')
|
||||
except Exception:
|
||||
body = str(exc)
|
||||
return {'error': body, 'status_code': exc.code}
|
||||
except Exception as exc:
|
||||
return {'error': str(exc)}
|
||||
|
||||
def build_project_repo_name(self, project_id: str, project_name: str | None = None) -> str:
|
||||
"""Build a repository name for a generated project."""
|
||||
preferred = (project_name or project_id or "project").strip().lower().replace(" ", "-")
|
||||
@@ -97,6 +182,36 @@ class GiteaAPI:
|
||||
result.setdefault("status", "created")
|
||||
return result
|
||||
|
||||
async def delete_repo(self, owner: str | None = None, repo: str | None = None) -> dict:
|
||||
"""Delete a repository from the configured organization/user."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
if not _owner or not _repo:
|
||||
return {'error': 'Owner and repository name are required'}
|
||||
result = await self._request('DELETE', f'repos/{_owner}/{_repo}')
|
||||
if not result.get('error'):
|
||||
result.setdefault('status', 'deleted')
|
||||
return result
|
||||
|
||||
def delete_repo_sync(self, owner: str | None = None, repo: str | None = None) -> dict:
|
||||
"""Synchronously delete a repository from the configured organization/user."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
if not _owner or not _repo:
|
||||
return {'error': 'Owner and repository name are required'}
|
||||
result = self._request_sync('DELETE', f'repos/{_owner}/{_repo}')
|
||||
if not result.get('error'):
|
||||
result.setdefault('status', 'deleted')
|
||||
return result
|
||||
|
||||
async def get_current_user(self) -> dict:
|
||||
"""Get the user associated with the configured token."""
|
||||
return await self._request("GET", "user")
|
||||
|
||||
def get_current_user_sync(self) -> dict:
|
||||
"""Synchronously get the user associated with the configured token."""
|
||||
return self._request_sync("GET", "user")
|
||||
|
||||
async def create_branch(self, branch: str, base: str = "main", owner: str | None = None, repo: str | None = None):
|
||||
"""Create a new branch."""
|
||||
_owner = owner or self.owner
|
||||
@@ -119,14 +234,174 @@ class GiteaAPI:
|
||||
"""Create a pull request."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
normalized_head = self._normalize_pull_request_head(head, _owner)
|
||||
payload = {
|
||||
"title": title,
|
||||
"body": body,
|
||||
"base": base,
|
||||
"head": head or f"{_owner}-{_repo}-ai-gen-{hash(title) % 10000}",
|
||||
"head": normalized_head or f"{_owner}:{_owner}-{_repo}-ai-gen-{hash(title) % 10000}",
|
||||
}
|
||||
return await self._request("POST", f"repos/{_owner}/{_repo}/pulls", payload)
|
||||
|
||||
def create_pull_request_sync(
|
||||
self,
|
||||
title: str,
|
||||
body: str,
|
||||
owner: str,
|
||||
repo: str,
|
||||
base: str = "main",
|
||||
head: str | None = None,
|
||||
) -> dict:
|
||||
"""Synchronously create a pull request."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
normalized_head = self._normalize_pull_request_head(head, _owner)
|
||||
payload = {
|
||||
"title": title,
|
||||
"body": body,
|
||||
"base": base,
|
||||
"head": normalized_head or f"{_owner}:{_owner}-{_repo}-ai-gen-{hash(title) % 10000}",
|
||||
}
|
||||
return self._request_sync("POST", f"repos/{_owner}/{_repo}/pulls", payload)
|
||||
|
||||
async def list_pull_requests(
|
||||
self,
|
||||
owner: str | None = None,
|
||||
repo: str | None = None,
|
||||
state: str = 'open',
|
||||
) -> dict | list:
|
||||
"""List pull requests for a repository."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}/pulls?state={state}")
|
||||
|
||||
def list_pull_requests_sync(
|
||||
self,
|
||||
owner: str | None = None,
|
||||
repo: str | None = None,
|
||||
state: str = 'open',
|
||||
) -> dict | list:
|
||||
"""Synchronously list pull requests for a repository."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}/pulls?state={state}")
|
||||
|
||||
async def list_repositories(self, owner: str | None = None) -> dict | list:
|
||||
"""List repositories within the configured organization."""
|
||||
_owner = owner or self.owner
|
||||
return await self._request("GET", f"orgs/{_owner}/repos")
|
||||
|
||||
def list_repositories_sync(self, owner: str | None = None) -> dict | list:
|
||||
"""Synchronously list repositories within the configured organization."""
|
||||
_owner = owner or self.owner
|
||||
return self._request_sync("GET", f"orgs/{_owner}/repos")
|
||||
|
||||
async def list_branches(self, owner: str | None = None, repo: str | None = None) -> dict | list:
|
||||
"""List repository branches."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}/branches")
|
||||
|
||||
def list_branches_sync(self, owner: str | None = None, repo: str | None = None) -> dict | list:
|
||||
"""Synchronously list repository branches."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}/branches")
|
||||
|
||||
async def list_issues(
|
||||
self,
|
||||
owner: str | None = None,
|
||||
repo: str | None = None,
|
||||
state: str = 'open',
|
||||
) -> dict | list:
|
||||
"""List repository issues, excluding pull requests at the consumer layer."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}/issues?state={state}")
|
||||
|
||||
def list_issues_sync(
|
||||
self,
|
||||
owner: str | None = None,
|
||||
repo: str | None = None,
|
||||
state: str = 'open',
|
||||
) -> dict | list:
|
||||
"""Synchronously list repository issues."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}/issues?state={state}")
|
||||
|
||||
async def get_issue(self, issue_number: int, owner: str | None = None, repo: str | None = None) -> dict:
|
||||
"""Return one repository issue by number."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}/issues/{issue_number}")
|
||||
|
||||
def get_issue_sync(self, issue_number: int, owner: str | None = None, repo: str | None = None) -> dict:
|
||||
"""Synchronously return one repository issue by number."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}/issues/{issue_number}")
|
||||
|
||||
async def list_repo_commits(
|
||||
self,
|
||||
owner: str | None = None,
|
||||
repo: str | None = None,
|
||||
limit: int = 25,
|
||||
branch: str | None = None,
|
||||
) -> dict | list:
|
||||
"""List recent commits for a repository."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
branch_query = f"&sha={branch}" if branch else ""
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}/commits?limit={limit}{branch_query}")
|
||||
|
||||
def list_repo_commits_sync(
|
||||
self,
|
||||
owner: str | None = None,
|
||||
repo: str | None = None,
|
||||
limit: int = 25,
|
||||
branch: str | None = None,
|
||||
) -> dict | list:
|
||||
"""Synchronously list recent commits for a repository."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
branch_query = f"&sha={branch}" if branch else ""
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}/commits?limit={limit}{branch_query}")
|
||||
|
||||
async def get_commit(
|
||||
self,
|
||||
commit_hash: str,
|
||||
owner: str | None = None,
|
||||
repo: str | None = None,
|
||||
) -> dict:
|
||||
"""Return one commit by hash."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}/git/commits/{commit_hash}")
|
||||
|
||||
def get_commit_sync(
|
||||
self,
|
||||
commit_hash: str,
|
||||
owner: str | None = None,
|
||||
repo: str | None = None,
|
||||
) -> dict:
|
||||
"""Synchronously return one commit by hash."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}/git/commits/{commit_hash}")
|
||||
|
||||
async def get_pull_request(self, pr_number: int, owner: str | None = None, repo: str | None = None) -> dict:
|
||||
"""Return one pull request by number."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}/pulls/{pr_number}")
|
||||
|
||||
def get_pull_request_sync(self, pr_number: int, owner: str | None = None, repo: str | None = None) -> dict:
|
||||
"""Synchronously return one pull request by number."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}/pulls/{pr_number}")
|
||||
|
||||
async def push_commit(
|
||||
self,
|
||||
branch: str,
|
||||
@@ -160,4 +435,14 @@ class GiteaAPI:
|
||||
if not _repo:
|
||||
return {"error": "Repository name required for org operations"}
|
||||
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}")
|
||||
return await self._request("GET", f"repos/{_owner}/{_repo}")
|
||||
|
||||
def get_repo_info_sync(self, owner: str | None = None, repo: str | None = None) -> dict:
|
||||
"""Synchronously get repository information."""
|
||||
_owner = owner or self.owner
|
||||
_repo = repo or self.repo
|
||||
|
||||
if not _repo:
|
||||
return {"error": "Repository name required for org operations"}
|
||||
|
||||
return self._request_sync("GET", f"repos/{_owner}/{_repo}")
|
||||
162
ai_software_factory/agents/home_assistant.py
Normal file
162
ai_software_factory/agents/home_assistant.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Home Assistant integration for energy-gated queue processing."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
try:
|
||||
from ..config import settings
|
||||
except ImportError:
|
||||
from config import settings
|
||||
|
||||
|
||||
class HomeAssistantAgent:
|
||||
"""Query Home Assistant for queue-processing eligibility and health."""
|
||||
|
||||
def __init__(self, base_url: str | None = None, token: str | None = None):
|
||||
self.base_url = (base_url or settings.home_assistant_url).rstrip('/')
|
||||
self.token = token or settings.home_assistant_token
|
||||
|
||||
def _headers(self) -> dict[str, str]:
|
||||
return {
|
||||
'Authorization': f'Bearer {self.token}',
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
def _state_url(self, entity_id: str) -> str:
|
||||
return f'{self.base_url}/api/states/{entity_id}'
|
||||
|
||||
async def _get_state(self, entity_id: str) -> dict:
|
||||
if not self.base_url:
|
||||
return {'error': 'Home Assistant URL is not configured'}
|
||||
if not self.token:
|
||||
return {'error': 'Home Assistant token is not configured'}
|
||||
if not entity_id:
|
||||
return {'error': 'Home Assistant entity id is not configured'}
|
||||
try:
|
||||
import aiohttp
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(self._state_url(entity_id), headers=self._headers()) as resp:
|
||||
payload = await resp.json(content_type=None)
|
||||
if 200 <= resp.status < 300:
|
||||
return payload if isinstance(payload, dict) else {'value': payload}
|
||||
return {'error': payload, 'status_code': resp.status}
|
||||
except Exception as exc:
|
||||
return {'error': str(exc)}
|
||||
|
||||
def _get_state_sync(self, entity_id: str) -> dict:
|
||||
if not self.base_url:
|
||||
return {'error': 'Home Assistant URL is not configured'}
|
||||
if not self.token:
|
||||
return {'error': 'Home Assistant token is not configured'}
|
||||
if not entity_id:
|
||||
return {'error': 'Home Assistant entity id is not configured'}
|
||||
try:
|
||||
import json
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
|
||||
request = urllib.request.Request(self._state_url(entity_id), headers=self._headers(), method='GET')
|
||||
with urllib.request.urlopen(request) as response:
|
||||
body = response.read().decode('utf-8')
|
||||
return json.loads(body) if body else {}
|
||||
except urllib.error.HTTPError as exc:
|
||||
try:
|
||||
body = exc.read().decode('utf-8')
|
||||
except Exception:
|
||||
body = str(exc)
|
||||
return {'error': body, 'status_code': exc.code}
|
||||
except Exception as exc:
|
||||
return {'error': str(exc)}
|
||||
|
||||
@staticmethod
|
||||
def _coerce_float(payload: dict) -> float | None:
|
||||
raw = payload.get('state') if isinstance(payload, dict) else None
|
||||
try:
|
||||
return float(raw)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
async def queue_gate_status(self, force: bool = False) -> dict:
|
||||
"""Return whether queued prompts may be processed now."""
|
||||
if force or settings.prompt_queue_force_process:
|
||||
return {
|
||||
'status': 'success',
|
||||
'allowed': True,
|
||||
'forced': True,
|
||||
'reason': 'Queue override is enabled',
|
||||
}
|
||||
battery = await self._get_state(settings.home_assistant_battery_entity_id)
|
||||
surplus = await self._get_state(settings.home_assistant_surplus_entity_id)
|
||||
battery_value = self._coerce_float(battery)
|
||||
surplus_value = self._coerce_float(surplus)
|
||||
checks = []
|
||||
if battery.get('error'):
|
||||
checks.append({'name': 'battery', 'ok': False, 'message': str(battery.get('error')), 'entity_id': settings.home_assistant_battery_entity_id})
|
||||
else:
|
||||
checks.append({'name': 'battery', 'ok': battery_value is not None and battery_value >= settings.home_assistant_battery_full_threshold, 'message': f'{battery_value}%', 'entity_id': settings.home_assistant_battery_entity_id})
|
||||
if surplus.get('error'):
|
||||
checks.append({'name': 'surplus', 'ok': False, 'message': str(surplus.get('error')), 'entity_id': settings.home_assistant_surplus_entity_id})
|
||||
else:
|
||||
checks.append({'name': 'surplus', 'ok': surplus_value is not None and surplus_value >= settings.home_assistant_surplus_threshold_watts, 'message': f'{surplus_value} W', 'entity_id': settings.home_assistant_surplus_entity_id})
|
||||
allowed = all(check['ok'] for check in checks)
|
||||
return {
|
||||
'status': 'success' if allowed else 'blocked',
|
||||
'allowed': allowed,
|
||||
'forced': False,
|
||||
'checks': checks,
|
||||
'battery_level': battery_value,
|
||||
'surplus_watts': surplus_value,
|
||||
'thresholds': {
|
||||
'battery_full_percent': settings.home_assistant_battery_full_threshold,
|
||||
'surplus_watts': settings.home_assistant_surplus_threshold_watts,
|
||||
},
|
||||
'reason': 'Energy gate open' if allowed else 'Battery or surplus threshold not met',
|
||||
}
|
||||
|
||||
def health_check_sync(self) -> dict:
|
||||
"""Return current Home Assistant connectivity and queue gate diagnostics."""
|
||||
if not self.base_url:
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': 'Home Assistant URL is not configured.',
|
||||
'base_url': '',
|
||||
'configured': False,
|
||||
'checks': [],
|
||||
}
|
||||
if not self.token:
|
||||
return {
|
||||
'status': 'error',
|
||||
'message': 'Home Assistant token is not configured.',
|
||||
'base_url': self.base_url,
|
||||
'configured': False,
|
||||
'checks': [],
|
||||
}
|
||||
battery = self._get_state_sync(settings.home_assistant_battery_entity_id)
|
||||
surplus = self._get_state_sync(settings.home_assistant_surplus_entity_id)
|
||||
checks = []
|
||||
for name, entity_id, payload in (
|
||||
('battery', settings.home_assistant_battery_entity_id, battery),
|
||||
('surplus', settings.home_assistant_surplus_entity_id, surplus),
|
||||
):
|
||||
checks.append(
|
||||
{
|
||||
'name': name,
|
||||
'entity_id': entity_id,
|
||||
'ok': not bool(payload.get('error')),
|
||||
'message': str(payload.get('error') or payload.get('state') or 'ok'),
|
||||
'status_code': payload.get('status_code'),
|
||||
'url': self._state_url(entity_id) if entity_id else self.base_url,
|
||||
}
|
||||
)
|
||||
return {
|
||||
'status': 'success' if all(check['ok'] for check in checks) else 'error',
|
||||
'message': 'Home Assistant connectivity is healthy.' if all(check['ok'] for check in checks) else 'Home Assistant checks failed.',
|
||||
'base_url': self.base_url,
|
||||
'configured': True,
|
||||
'checks': checks,
|
||||
'queue_gate': {
|
||||
'battery_full_percent': settings.home_assistant_battery_full_threshold,
|
||||
'surplus_watts': settings.home_assistant_surplus_threshold_watts,
|
||||
'force_process': settings.prompt_queue_force_process,
|
||||
},
|
||||
}
|
||||
394
ai_software_factory/agents/llm_service.py
Normal file
394
ai_software_factory/agents/llm_service.py
Normal file
@@ -0,0 +1,394 @@
|
||||
"""Centralized LLM client with guardrails and mediated tool context."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
try:
|
||||
from .gitea import GiteaAPI
|
||||
except ImportError:
|
||||
from gitea import GiteaAPI
|
||||
|
||||
try:
|
||||
from ..config import settings
|
||||
except ImportError:
|
||||
from config import settings
|
||||
|
||||
|
||||
class LLMToolbox:
|
||||
"""Build named tool payloads that can be shared with external LLM providers."""
|
||||
|
||||
SUPPORTED_LIVE_TOOL_STAGES = ('request_interpretation', 'change_summary', 'generation_plan', 'project_naming', 'project_id_naming')
|
||||
|
||||
def build_tool_context(self, stage: str, context: dict | None = None) -> list[dict]:
|
||||
"""Return the mediated tool payloads allowed for this LLM request."""
|
||||
context = context or {}
|
||||
allowed = set(settings.llm_tool_allowlist)
|
||||
limit = settings.llm_tool_context_limit
|
||||
tool_context: list[dict] = []
|
||||
|
||||
if 'gitea_project_catalog' in allowed:
|
||||
projects = context.get('projects') or []
|
||||
if projects:
|
||||
tool_context.append(
|
||||
{
|
||||
'name': 'gitea_project_catalog',
|
||||
'description': 'Tracked active projects and their repository mappings inside the factory.',
|
||||
'payload': projects[:limit],
|
||||
}
|
||||
)
|
||||
|
||||
if 'gitea_project_state' in allowed:
|
||||
state_payload = {
|
||||
'project_id': context.get('project_id'),
|
||||
'project_name': context.get('project_name') or context.get('name'),
|
||||
'repository': context.get('repository'),
|
||||
'repository_url': context.get('repository_url'),
|
||||
'pull_request': context.get('pull_request'),
|
||||
'pull_request_url': context.get('pull_request_url'),
|
||||
'pull_request_state': context.get('pull_request_state'),
|
||||
'related_issue': context.get('related_issue'),
|
||||
}
|
||||
if any(value for value in state_payload.values()):
|
||||
tool_context.append(
|
||||
{
|
||||
'name': 'gitea_project_state',
|
||||
'description': 'Current repository and pull-request state for the project being discussed.',
|
||||
'payload': state_payload,
|
||||
}
|
||||
)
|
||||
|
||||
if 'gitea_project_issues' in allowed:
|
||||
issues = context.get('open_issues') or context.get('issues') or []
|
||||
if issues:
|
||||
tool_context.append(
|
||||
{
|
||||
'name': 'gitea_project_issues',
|
||||
'description': 'Open tracked Gitea issues for the relevant project repository.',
|
||||
'payload': issues[:limit],
|
||||
}
|
||||
)
|
||||
|
||||
if 'gitea_pull_requests' in allowed:
|
||||
pull_requests = context.get('pull_requests') or []
|
||||
if pull_requests:
|
||||
tool_context.append(
|
||||
{
|
||||
'name': 'gitea_pull_requests',
|
||||
'description': 'Tracked pull requests associated with the relevant project repository.',
|
||||
'payload': pull_requests[:limit],
|
||||
}
|
||||
)
|
||||
|
||||
return tool_context
|
||||
|
||||
def build_live_tool_specs(self, stage: str, context: dict | None = None) -> list[dict]:
|
||||
"""Return live tool-call specs that the model may request explicitly."""
|
||||
_context = context or {}
|
||||
specs = []
|
||||
allowed = set(settings.llm_live_tools_for_stage(stage))
|
||||
if 'gitea_lookup_issue' in allowed:
|
||||
specs.append(
|
||||
{
|
||||
'name': 'gitea_lookup_issue',
|
||||
'description': 'Fetch one live Gitea issue by issue number for a tracked repository.',
|
||||
'arguments': {
|
||||
'project_id': 'optional tracked project id',
|
||||
'owner': 'optional repository owner override',
|
||||
'repo': 'optional repository name override',
|
||||
'issue_number': 'required integer issue number',
|
||||
},
|
||||
}
|
||||
)
|
||||
if 'gitea_lookup_pull_request' in allowed:
|
||||
specs.append(
|
||||
{
|
||||
'name': 'gitea_lookup_pull_request',
|
||||
'description': 'Fetch one live Gitea pull request by PR number for a tracked repository.',
|
||||
'arguments': {
|
||||
'project_id': 'optional tracked project id',
|
||||
'owner': 'optional repository owner override',
|
||||
'repo': 'optional repository name override',
|
||||
'pr_number': 'required integer pull request number',
|
||||
},
|
||||
}
|
||||
)
|
||||
return specs
|
||||
|
||||
|
||||
class LLMLiveToolExecutor:
|
||||
"""Resolve bounded live tool requests on behalf of the model."""
|
||||
|
||||
def __init__(self):
|
||||
self.gitea_api = None
|
||||
if settings.gitea_url and settings.gitea_token:
|
||||
self.gitea_api = GiteaAPI(
|
||||
token=settings.GITEA_TOKEN,
|
||||
base_url=settings.GITEA_URL,
|
||||
owner=settings.GITEA_OWNER,
|
||||
repo=settings.GITEA_REPO or '',
|
||||
)
|
||||
|
||||
async def execute(self, tool_name: str, arguments: dict, context: dict | None = None) -> dict:
|
||||
"""Execute one live tool request and normalize the result."""
|
||||
if tool_name not in set(settings.llm_live_tool_allowlist):
|
||||
return {'error': f'Tool {tool_name} is not enabled'}
|
||||
if self.gitea_api is None:
|
||||
return {'error': 'Gitea live tool execution is not configured'}
|
||||
resolved = self._resolve_repository(arguments=arguments, context=context or {})
|
||||
if resolved.get('error'):
|
||||
return resolved
|
||||
owner = resolved['owner']
|
||||
repo = resolved['repo']
|
||||
|
||||
if tool_name == 'gitea_lookup_issue':
|
||||
issue_number = arguments.get('issue_number')
|
||||
if issue_number is None:
|
||||
return {'error': 'issue_number is required'}
|
||||
return await self.gitea_api.get_issue(issue_number=int(issue_number), owner=owner, repo=repo)
|
||||
|
||||
if tool_name == 'gitea_lookup_pull_request':
|
||||
pr_number = arguments.get('pr_number')
|
||||
if pr_number is None:
|
||||
return {'error': 'pr_number is required'}
|
||||
return await self.gitea_api.get_pull_request(pr_number=int(pr_number), owner=owner, repo=repo)
|
||||
|
||||
return {'error': f'Unsupported tool {tool_name}'}
|
||||
|
||||
def _resolve_repository(self, arguments: dict, context: dict) -> dict:
|
||||
"""Resolve repository owner/name from explicit args or tracked project context."""
|
||||
owner = arguments.get('owner')
|
||||
repo = arguments.get('repo')
|
||||
if owner and repo:
|
||||
return {'owner': owner, 'repo': repo}
|
||||
project_id = arguments.get('project_id')
|
||||
if project_id:
|
||||
for project in context.get('projects', []):
|
||||
if project.get('project_id') == project_id:
|
||||
repository = project.get('repository') or {}
|
||||
if repository.get('owner') and repository.get('name'):
|
||||
return {'owner': repository['owner'], 'repo': repository['name']}
|
||||
state = context.get('repository') or {}
|
||||
if context.get('project_id') == project_id and state.get('owner') and state.get('name'):
|
||||
return {'owner': state['owner'], 'repo': state['name']}
|
||||
repository = context.get('repository') or {}
|
||||
if repository.get('owner') and repository.get('name'):
|
||||
return {'owner': repository['owner'], 'repo': repository['name']}
|
||||
return {'error': 'Could not resolve repository for tool request'}
|
||||
|
||||
|
||||
class LLMServiceClient:
|
||||
"""Call the configured LLM provider with consistent guardrails and tool payloads."""
|
||||
|
||||
def __init__(self, ollama_url: str | None = None, model: str | None = None):
|
||||
self.ollama_url = (ollama_url or settings.ollama_url).rstrip('/')
|
||||
self.model = model or settings.OLLAMA_MODEL
|
||||
self.toolbox = LLMToolbox()
|
||||
self.live_tool_executor = LLMLiveToolExecutor()
|
||||
|
||||
async def chat_with_trace(
|
||||
self,
|
||||
*,
|
||||
stage: str,
|
||||
system_prompt: str,
|
||||
user_prompt: str,
|
||||
tool_context_input: dict | None = None,
|
||||
expect_json: bool = False,
|
||||
) -> tuple[str | None, dict]:
|
||||
"""Invoke the configured LLM and return both content and a structured trace."""
|
||||
effective_system_prompt = self._compose_system_prompt(stage, system_prompt)
|
||||
tool_context = self.toolbox.build_tool_context(stage=stage, context=tool_context_input)
|
||||
live_tool_specs = self.toolbox.build_live_tool_specs(stage=stage, context=tool_context_input)
|
||||
effective_user_prompt = self._compose_user_prompt(user_prompt, tool_context, live_tool_specs)
|
||||
raw_responses: list[dict] = []
|
||||
executed_tool_calls: list[dict] = []
|
||||
current_user_prompt = effective_user_prompt
|
||||
max_rounds = settings.llm_max_tool_call_rounds
|
||||
|
||||
for round_index in range(max_rounds + 1):
|
||||
content, payload, error = await self._send_chat_request(
|
||||
system_prompt=effective_system_prompt,
|
||||
user_prompt=current_user_prompt,
|
||||
expect_json=expect_json,
|
||||
)
|
||||
raw_responses.append(payload)
|
||||
if content:
|
||||
tool_request = self._extract_tool_request(content)
|
||||
if tool_request and round_index < max_rounds:
|
||||
tool_name = tool_request.get('name')
|
||||
tool_arguments = tool_request.get('arguments') or {}
|
||||
tool_result = await self.live_tool_executor.execute(tool_name, tool_arguments, tool_context_input)
|
||||
executed_tool_calls.append(
|
||||
{
|
||||
'name': tool_name,
|
||||
'arguments': tool_arguments,
|
||||
'result': tool_result,
|
||||
}
|
||||
)
|
||||
current_user_prompt = self._compose_follow_up_prompt(user_prompt, tool_context, live_tool_specs, executed_tool_calls)
|
||||
continue
|
||||
return content, {
|
||||
'stage': stage,
|
||||
'provider': 'ollama',
|
||||
'model': self.model,
|
||||
'system_prompt': effective_system_prompt,
|
||||
'user_prompt': current_user_prompt,
|
||||
'assistant_response': content,
|
||||
'raw_response': {
|
||||
'provider_response': raw_responses[-1],
|
||||
'provider_responses': raw_responses,
|
||||
'tool_context': tool_context,
|
||||
'live_tool_specs': live_tool_specs,
|
||||
'executed_tool_calls': executed_tool_calls,
|
||||
},
|
||||
'raw_responses': raw_responses,
|
||||
'fallback_used': False,
|
||||
'guardrails': self._guardrail_sections(stage),
|
||||
'tool_context': tool_context,
|
||||
'live_tool_specs': live_tool_specs,
|
||||
'executed_tool_calls': executed_tool_calls,
|
||||
}
|
||||
if error:
|
||||
break
|
||||
|
||||
return None, {
|
||||
'stage': stage,
|
||||
'provider': 'ollama',
|
||||
'model': self.model,
|
||||
'system_prompt': effective_system_prompt,
|
||||
'user_prompt': current_user_prompt,
|
||||
'assistant_response': '',
|
||||
'raw_response': {
|
||||
'provider_response': raw_responses[-1] if raw_responses else {'error': 'No response'},
|
||||
'provider_responses': raw_responses,
|
||||
'tool_context': tool_context,
|
||||
'live_tool_specs': live_tool_specs,
|
||||
'executed_tool_calls': executed_tool_calls,
|
||||
},
|
||||
'raw_responses': raw_responses,
|
||||
'fallback_used': True,
|
||||
'guardrails': self._guardrail_sections(stage),
|
||||
'tool_context': tool_context,
|
||||
'live_tool_specs': live_tool_specs,
|
||||
'executed_tool_calls': executed_tool_calls,
|
||||
}
|
||||
|
||||
async def _send_chat_request(self, *, system_prompt: str, user_prompt: str, expect_json: bool) -> tuple[str | None, dict, str | None]:
|
||||
"""Send one outbound chat request to the configured model provider."""
|
||||
request_payload = {
|
||||
'model': self.model,
|
||||
'stream': False,
|
||||
'messages': [
|
||||
{'role': 'system', 'content': system_prompt},
|
||||
{'role': 'user', 'content': user_prompt},
|
||||
],
|
||||
}
|
||||
if expect_json:
|
||||
request_payload['format'] = 'json'
|
||||
try:
|
||||
import aiohttp
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(f'{self.ollama_url}/api/chat', json=request_payload) as resp:
|
||||
payload = await resp.json()
|
||||
if 200 <= resp.status < 300:
|
||||
return (payload.get('message') or {}).get('content', ''), payload, None
|
||||
return None, payload, str(payload.get('error') or payload)
|
||||
except Exception as exc:
|
||||
return None, {'error': str(exc)}, str(exc)
|
||||
|
||||
def _compose_system_prompt(self, stage: str, stage_prompt: str) -> str:
|
||||
"""Merge the stage prompt with configured guardrails."""
|
||||
sections = [stage_prompt.strip()] + self._guardrail_sections(stage)
|
||||
return '\n\n'.join(section for section in sections if section)
|
||||
|
||||
def _guardrail_sections(self, stage: str) -> list[str]:
|
||||
"""Return all configured guardrail sections for one LLM stage."""
|
||||
sections = []
|
||||
if settings.llm_guardrail_prompt:
|
||||
sections.append(f'Global guardrails:\n{settings.llm_guardrail_prompt}')
|
||||
stage_specific = {
|
||||
'request_interpretation': settings.llm_request_interpreter_guardrail_prompt,
|
||||
'change_summary': settings.llm_change_summary_guardrail_prompt,
|
||||
'project_naming': settings.llm_project_naming_guardrail_prompt,
|
||||
'project_id_naming': settings.llm_project_id_guardrail_prompt,
|
||||
}.get(stage)
|
||||
if stage_specific:
|
||||
sections.append(f'Stage-specific guardrails:\n{stage_specific}')
|
||||
return sections
|
||||
|
||||
def _compose_user_prompt(self, prompt: str, tool_context: list[dict], live_tool_specs: list[dict] | None = None) -> str:
|
||||
"""Append tool payloads and live tool-call specs to the outbound user prompt."""
|
||||
live_tool_specs = live_tool_specs if live_tool_specs is not None else []
|
||||
sections = [prompt]
|
||||
if not tool_context:
|
||||
pass
|
||||
else:
|
||||
sections.append(
|
||||
'Service-mediated tool outputs are available below. Treat them as authoritative read-only data supplied by the factory:\n'
|
||||
f'{json.dumps(tool_context, indent=2, sort_keys=True)}'
|
||||
)
|
||||
if live_tool_specs:
|
||||
sections.append(
|
||||
'If you need additional live repository data, you may request exactly one tool call by responding with JSON shaped as '
|
||||
'{"tool_request": {"name": "<tool name>", "arguments": {...}}}. '
|
||||
'After tool results are returned, respond with the final answer instead of another tool request.\n'
|
||||
f'Available live tools:\n{json.dumps(live_tool_specs, indent=2, sort_keys=True)}'
|
||||
)
|
||||
return '\n\n'.join(section for section in sections if section)
|
||||
|
||||
def _compose_follow_up_prompt(self, original_prompt: str, tool_context: list[dict], live_tool_specs: list[dict], executed_tool_calls: list[dict]) -> str:
|
||||
"""Build the follow-up user prompt after executing one or more live tool requests."""
|
||||
sections = [self._compose_user_prompt(original_prompt, tool_context, live_tool_specs)]
|
||||
sections.append(
|
||||
'The service executed the requested live tool call(s). Use the tool result(s) below to produce the final answer. Do not request another tool call.\n'
|
||||
f'{json.dumps(executed_tool_calls, indent=2, sort_keys=True)}'
|
||||
)
|
||||
return '\n\n'.join(sections)
|
||||
|
||||
def _extract_tool_request(self, content: str) -> dict | None:
|
||||
"""Return a normalized tool request when the model explicitly asks for one."""
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
except Exception:
|
||||
return None
|
||||
if not isinstance(parsed, dict):
|
||||
return None
|
||||
tool_request = parsed.get('tool_request')
|
||||
if not isinstance(tool_request, dict) or not tool_request.get('name'):
|
||||
return None
|
||||
return {
|
||||
'name': str(tool_request.get('name')).strip(),
|
||||
'arguments': tool_request.get('arguments') or {},
|
||||
}
|
||||
|
||||
def get_runtime_configuration(self) -> dict:
|
||||
"""Return the active LLM runtime config, guardrails, and tool exposure."""
|
||||
live_tool_stages = {
|
||||
stage: settings.llm_live_tools_for_stage(stage)
|
||||
for stage in self.toolbox.SUPPORTED_LIVE_TOOL_STAGES
|
||||
}
|
||||
return {
|
||||
'provider': 'ollama',
|
||||
'ollama_url': self.ollama_url,
|
||||
'model': self.model,
|
||||
'guardrails': {
|
||||
'global': settings.llm_guardrail_prompt,
|
||||
'request_interpretation': settings.llm_request_interpreter_guardrail_prompt,
|
||||
'change_summary': settings.llm_change_summary_guardrail_prompt,
|
||||
'project_naming': settings.llm_project_naming_guardrail_prompt,
|
||||
'project_id_naming': settings.llm_project_id_guardrail_prompt,
|
||||
},
|
||||
'system_prompts': {
|
||||
'project_naming': settings.llm_project_naming_system_prompt,
|
||||
'project_id_naming': settings.llm_project_id_system_prompt,
|
||||
},
|
||||
'mediated_tools': settings.llm_tool_allowlist,
|
||||
'live_tools': settings.llm_live_tool_allowlist,
|
||||
'live_tool_stage_allowlist': settings.llm_live_tool_stage_allowlist,
|
||||
'live_tool_stage_tool_map': settings.llm_live_tool_stage_tool_map,
|
||||
'live_tools_by_stage': live_tool_stages,
|
||||
'tool_context_limit': settings.llm_tool_context_limit,
|
||||
'max_tool_call_rounds': settings.llm_max_tool_call_rounds,
|
||||
'gitea_live_tools_configured': bool(settings.gitea_url and settings.gitea_token),
|
||||
}
|
||||
@@ -220,11 +220,31 @@ class N8NSetupAgent:
|
||||
|
||||
async def create_workflow(self, workflow_json: dict) -> dict:
|
||||
"""Create or update a workflow."""
|
||||
return await self._request("POST", "workflows", json=workflow_json)
|
||||
return await self._request("POST", "workflows", json=self._workflow_payload(workflow_json))
|
||||
|
||||
def _workflow_payload(self, workflow_json: dict) -> dict:
|
||||
"""Return a workflow payload without server-managed read-only fields."""
|
||||
payload = dict(workflow_json)
|
||||
payload.pop("active", None)
|
||||
payload.pop("id", None)
|
||||
payload.pop("createdAt", None)
|
||||
payload.pop("updatedAt", None)
|
||||
payload.pop("versionId", None)
|
||||
return payload
|
||||
|
||||
async def _update_workflow_via_put(self, workflow_id: str, workflow_json: dict) -> dict:
|
||||
"""Fallback update path for n8n instances that only support PUT."""
|
||||
return await self._request("PUT", f"workflows/{workflow_id}", json=self._workflow_payload(workflow_json))
|
||||
|
||||
async def update_workflow(self, workflow_id: str, workflow_json: dict) -> dict:
|
||||
"""Update an existing workflow."""
|
||||
return await self._request("PATCH", f"workflows/{workflow_id}", json=workflow_json)
|
||||
result = await self._request("PATCH", f"workflows/{workflow_id}", json=self._workflow_payload(workflow_json))
|
||||
if result.get("status_code") == 405:
|
||||
fallback = await self._update_workflow_via_put(workflow_id, workflow_json)
|
||||
if not fallback.get("error") and isinstance(fallback, dict):
|
||||
fallback.setdefault("method", "PUT")
|
||||
return fallback
|
||||
return result
|
||||
|
||||
async def enable_workflow(self, workflow_id: str) -> dict:
|
||||
"""Enable a workflow."""
|
||||
@@ -232,6 +252,11 @@ class N8NSetupAgent:
|
||||
if result.get("error"):
|
||||
fallback = await self._request("PATCH", f"workflows/{workflow_id}", json={"active": True})
|
||||
if fallback.get("error"):
|
||||
if fallback.get("status_code") == 405:
|
||||
put_fallback = await self._request("PUT", f"workflows/{workflow_id}", json={"active": True})
|
||||
if put_fallback.get("error"):
|
||||
return put_fallback
|
||||
return {"success": True, "id": workflow_id, "method": "put"}
|
||||
return fallback
|
||||
return {"success": True, "id": workflow_id, "method": "patch"}
|
||||
return {"success": True, "id": workflow_id, "method": "activate"}
|
||||
@@ -250,12 +275,12 @@ class N8NSetupAgent:
|
||||
return value
|
||||
return []
|
||||
|
||||
def build_telegram_workflow(self, webhook_path: str, backend_url: str) -> dict:
|
||||
def build_telegram_workflow(self, webhook_path: str, backend_url: str, allowed_chat_id: str | None = None) -> dict:
|
||||
"""Build the Telegram-to-backend workflow definition."""
|
||||
normalized_path = webhook_path.strip().strip("/") or "telegram"
|
||||
allowed_chat = json.dumps(str(allowed_chat_id)) if allowed_chat_id else "''"
|
||||
return {
|
||||
"name": "Telegram to AI Software Factory",
|
||||
"active": False,
|
||||
"settings": {"executionOrder": "v1"},
|
||||
"nodes": [
|
||||
{
|
||||
@@ -273,13 +298,13 @@ class N8NSetupAgent:
|
||||
},
|
||||
{
|
||||
"id": "parse-node",
|
||||
"name": "Prepare Software Request",
|
||||
"name": "Prepare Freeform Request",
|
||||
"type": "n8n-nodes-base.code",
|
||||
"typeVersion": 2,
|
||||
"position": [-200, 120],
|
||||
"parameters": {
|
||||
"language": "javaScript",
|
||||
"jsCode": "const body = $json.body ?? $json;\nconst message = body.message ?? body;\nconst text = String(message.text ?? '').trim();\nconst lines = text.split(/\\r?\\n/);\nconst request = { name: null, description: '', features: [], tech_stack: [] };\nlet nameIndex = -1;\nlet featuresIndex = -1;\nlet techIndex = -1;\nfor (let i = 0; i < lines.length; i += 1) {\n const line = lines[i].trim();\n if (line.toLowerCase().startsWith('name:')) { request.name = line.split(':', 2)[1]?.trim() || null; nameIndex = i; }\n if (line.toLowerCase().startsWith('features:') && featuresIndex === -1) { featuresIndex = i; }\n if (line.toLowerCase().startsWith('tech stack:') && techIndex === -1) { techIndex = i; }\n}\nif (nameIndex >= 0) {\n const descriptionEnd = featuresIndex >= 0 ? featuresIndex : (techIndex >= 0 ? techIndex : lines.length);\n request.description = lines.slice(nameIndex + 1, descriptionEnd).join('\\n').replace(/^description:\\s*/i, '').trim();\n}\nfunction collectList(startIndex, fieldName) {\n if (startIndex < 0) return;\n const firstLine = lines[startIndex].split(':').slice(1).join(':').trim();\n if (firstLine && !firstLine.startsWith('-') && !firstLine.startsWith('*')) {\n request[fieldName].push(...firstLine.split(',').map(item => item.trim()).filter(Boolean));\n }\n for (const rawLine of lines.slice(startIndex + 1)) {\n const line = rawLine.trim();\n if (!line) continue;\n if (/^[A-Za-z ]+:/.test(line)) break;\n if (line.startsWith('-') || line.startsWith('*')) {\n const value = line.slice(1).trim();\n if (value) request[fieldName].push(value);\n }\n }\n}\ncollectList(featuresIndex, 'features');\ncollectList(techIndex, 'tech_stack');\nif (!request.name || request.features.length === 0) { throw new Error('Could not parse software request from Telegram message'); }\nreturn [{ json: { ...request, _source: { raw_text: text, chat_id: message.chat?.id ?? null } } }];",
|
||||
"jsCode": f"const allowedChatId = {allowed_chat};\nconst body = $json.body ?? $json;\nconst message = body.message ?? body;\nconst text = String(message.text ?? '').trim();\nconst chatId = String(message.chat?.id ?? '');\nif (allowedChatId && chatId !== allowedChatId) {{\n return [{{ json: {{ ignored: true, message: `Ignoring message from chat ${{chatId}}`, prompt_text: text, source: 'telegram', chat_id: chatId, chat_type: message.chat?.type ?? null }} }}];\n}}\nreturn [{{ json: {{ prompt_text: text, source: 'telegram', chat_id: chatId, chat_type: message.chat?.type ?? null }} }}];",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -310,8 +335,8 @@ class N8NSetupAgent:
|
||||
},
|
||||
],
|
||||
"connections": {
|
||||
"Telegram Webhook": {"main": [[{"node": "Prepare Software Request", "type": "main", "index": 0}]]},
|
||||
"Prepare Software Request": {"main": [[{"node": "AI Software Factory API", "type": "main", "index": 0}]]},
|
||||
"Telegram Webhook": {"main": [[{"node": "Prepare Freeform Request", "type": "main", "index": 0}]]},
|
||||
"Prepare Freeform Request": {"main": [[{"node": "AI Software Factory API", "type": "main", "index": 0}]]},
|
||||
"AI Software Factory API": {"main": [[{"node": "Respond to Telegram Webhook", "type": "main", "index": 0}]]},
|
||||
},
|
||||
}
|
||||
@@ -320,11 +345,12 @@ class N8NSetupAgent:
|
||||
self,
|
||||
backend_url: str,
|
||||
credential_name: str,
|
||||
allowed_chat_id: str | None = None,
|
||||
) -> dict:
|
||||
"""Build a production Telegram Trigger based workflow."""
|
||||
allowed_chat = json.dumps(str(allowed_chat_id)) if allowed_chat_id else "''"
|
||||
return {
|
||||
"name": "Telegram to AI Software Factory",
|
||||
"active": False,
|
||||
"settings": {"executionOrder": "v1"},
|
||||
"nodes": [
|
||||
{
|
||||
@@ -333,18 +359,18 @@ class N8NSetupAgent:
|
||||
"type": "n8n-nodes-base.telegramTrigger",
|
||||
"typeVersion": 1,
|
||||
"position": [-520, 120],
|
||||
"parameters": {"updates": ["message"]},
|
||||
"parameters": {"updates": ["message", "channel_post"]},
|
||||
"credentials": {"telegramApi": {"name": credential_name}},
|
||||
},
|
||||
{
|
||||
"id": "parse-node",
|
||||
"name": "Prepare Software Request",
|
||||
"id": "filter-node",
|
||||
"name": "Prepare Freeform Request",
|
||||
"type": "n8n-nodes-base.code",
|
||||
"typeVersion": 2,
|
||||
"position": [-180, 120],
|
||||
"parameters": {
|
||||
"language": "javaScript",
|
||||
"jsCode": "const message = $json.message ?? $json;\nconst text = String(message.text ?? '').trim();\nconst lines = text.split(/\\r?\\n/);\nconst request = { name: null, description: '', features: [], tech_stack: [], _source: { raw_text: text, chat_id: message.chat?.id ?? null } };\nlet nameIndex = -1;\nlet featuresIndex = -1;\nlet techIndex = -1;\nfor (let i = 0; i < lines.length; i += 1) {\n const line = lines[i].trim();\n if (line.toLowerCase().startsWith('name:')) { request.name = line.split(':', 2)[1]?.trim() || null; nameIndex = i; }\n if (line.toLowerCase().startsWith('features:') && featuresIndex === -1) { featuresIndex = i; }\n if (line.toLowerCase().startsWith('tech stack:') && techIndex === -1) { techIndex = i; }\n}\nif (nameIndex >= 0) {\n const descriptionEnd = featuresIndex >= 0 ? featuresIndex : (techIndex >= 0 ? techIndex : lines.length);\n request.description = lines.slice(nameIndex + 1, descriptionEnd).join('\\n').replace(/^description:\\s*/i, '').trim();\n}\nfunction collectList(startIndex, fieldName) {\n if (startIndex < 0) return;\n const firstLine = lines[startIndex].split(':').slice(1).join(':').trim();\n if (firstLine && !firstLine.startsWith('-') && !firstLine.startsWith('*')) {\n request[fieldName].push(...firstLine.split(',').map(item => item.trim()).filter(Boolean));\n }\n for (const rawLine of lines.slice(startIndex + 1)) {\n const line = rawLine.trim();\n if (!line) continue;\n if (/^[A-Za-z ]+:/.test(line)) break;\n if (line.startsWith('-') || line.startsWith('*')) {\n const value = line.slice(1).trim();\n if (value) request[fieldName].push(value);\n }\n }\n}\ncollectList(featuresIndex, 'features');\ncollectList(techIndex, 'tech_stack');\nif (!request.name || request.features.length === 0) { throw new Error('Could not parse software request from Telegram message'); }\nreturn [{ json: request }];",
|
||||
"jsCode": f"const allowedChatId = {allowed_chat};\nconst message = $json.message ?? $json.channel_post ?? $json;\nconst text = String(message.text ?? '').trim();\nconst chatId = String(message.chat?.id ?? '');\nif (!text) return [];\nif (allowedChatId && chatId !== allowedChatId) return [];\nreturn [{{ json: {{ prompt_text: text, source: 'telegram', chat_id: chatId, chat_type: message.chat?.type ?? null }} }}];",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -371,15 +397,15 @@ class N8NSetupAgent:
|
||||
"parameters": {
|
||||
"resource": "message",
|
||||
"operation": "sendMessage",
|
||||
"chatId": "={{ $('Telegram Trigger').item.json.message.chat.id }}",
|
||||
"text": "={{ $json.data ? `Generated ${$json.data.name} (${($json.data.changed_files || []).length} files)` : ($json.message || 'Software generation request accepted') }}",
|
||||
"chatId": "={{ ($('Telegram Trigger').item.json.message ?? $('Telegram Trigger').item.json.channel_post).chat.id }}",
|
||||
"text": "={{ $json.summary_message || $json.data?.summary_message || $json.message || 'Software generation request accepted' }}",
|
||||
},
|
||||
"credentials": {"telegramApi": {"name": credential_name}},
|
||||
},
|
||||
],
|
||||
"connections": {
|
||||
"Telegram Trigger": {"main": [[{"node": "Prepare Software Request", "type": "main", "index": 0}]]},
|
||||
"Prepare Software Request": {"main": [[{"node": "AI Software Factory API", "type": "main", "index": 0}]]},
|
||||
"Telegram Trigger": {"main": [[{"node": "Prepare Freeform Request", "type": "main", "index": 0}]]},
|
||||
"Prepare Freeform Request": {"main": [[{"node": "AI Software Factory API", "type": "main", "index": 0}]]},
|
||||
"AI Software Factory API": {"main": [[{"node": "Send Telegram Update", "type": "main", "index": 0}]]},
|
||||
},
|
||||
}
|
||||
@@ -433,7 +459,7 @@ class N8NSetupAgent:
|
||||
"""
|
||||
return await self.setup(
|
||||
webhook_path=webhook_path,
|
||||
backend_url=f"{settings.backend_public_url}/generate",
|
||||
backend_url=f"{settings.backend_public_url}/generate/text",
|
||||
force_update=False,
|
||||
)
|
||||
|
||||
@@ -470,7 +496,7 @@ class N8NSetupAgent:
|
||||
"suggestion": health.get("suggestion"),
|
||||
}
|
||||
|
||||
effective_backend_url = backend_url or f"{settings.backend_public_url}/generate"
|
||||
effective_backend_url = backend_url or f"{settings.backend_public_url}/generate/text"
|
||||
effective_bot_token = telegram_bot_token or settings.telegram_bot_token
|
||||
effective_credential_name = telegram_credential_name or settings.n8n_telegram_credential_name
|
||||
trigger_mode = use_telegram_trigger if use_telegram_trigger is not None else bool(effective_bot_token)
|
||||
@@ -482,11 +508,13 @@ class N8NSetupAgent:
|
||||
workflow = self.build_telegram_trigger_workflow(
|
||||
backend_url=effective_backend_url,
|
||||
credential_name=effective_credential_name,
|
||||
allowed_chat_id=settings.telegram_chat_id,
|
||||
)
|
||||
else:
|
||||
workflow = self.build_telegram_workflow(
|
||||
webhook_path=webhook_path,
|
||||
backend_url=effective_backend_url,
|
||||
allowed_chat_id=settings.telegram_chat_id,
|
||||
)
|
||||
|
||||
existing = await self.get_workflow(workflow["name"])
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
127
ai_software_factory/agents/prompt_workflow.py
Normal file
127
ai_software_factory/agents/prompt_workflow.py
Normal file
@@ -0,0 +1,127 @@
|
||||
"""Helpers for prompt-level repository workflows such as undoing a prompt."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
|
||||
try:
|
||||
from ..config import settings
|
||||
from .database_manager import DatabaseManager
|
||||
from .git_manager import GitManager
|
||||
from .gitea import GiteaAPI
|
||||
except ImportError:
|
||||
from config import settings
|
||||
from agents.database_manager import DatabaseManager
|
||||
from agents.git_manager import GitManager
|
||||
from agents.gitea import GiteaAPI
|
||||
|
||||
|
||||
class PromptWorkflowManager:
|
||||
"""Coordinate prompt-level repository actions against git and Gitea."""
|
||||
|
||||
def __init__(self, db):
|
||||
self.db_manager = DatabaseManager(db)
|
||||
self.gitea_api = GiteaAPI(
|
||||
token=settings.GITEA_TOKEN,
|
||||
base_url=settings.GITEA_URL,
|
||||
owner=settings.GITEA_OWNER,
|
||||
repo=settings.GITEA_REPO or '',
|
||||
)
|
||||
|
||||
async def undo_prompt(self, project_id: str, prompt_id: int) -> dict:
|
||||
"""Revert the commit associated with a prompt and push the revert to the PR branch."""
|
||||
history = self.db_manager.get_project_by_id(project_id)
|
||||
if history is None:
|
||||
return {'status': 'error', 'message': 'Project not found'}
|
||||
|
||||
correlations = self.db_manager.get_prompt_change_correlations(project_id=project_id, limit=500)
|
||||
correlation = next((item for item in correlations if item.get('prompt_id') == prompt_id), None)
|
||||
if correlation is None:
|
||||
return {'status': 'error', 'message': 'Prompt not found for project'}
|
||||
if correlation.get('revert'):
|
||||
return {'status': 'ignored', 'message': 'Prompt has already been reverted', 'revert': correlation['revert']}
|
||||
|
||||
original_commit = next(
|
||||
(commit for commit in correlation.get('commits', []) if commit.get('remote_status') != 'reverted' and commit.get('commit_hash')),
|
||||
None,
|
||||
)
|
||||
if original_commit is None:
|
||||
return {'status': 'error', 'message': 'No reversible commit was recorded for this prompt'}
|
||||
|
||||
branch = original_commit.get('branch') or f'ai/{project_id}'
|
||||
project_root = settings.projects_root / project_id
|
||||
git_manager = GitManager(project_id, project_dir=str(project_root))
|
||||
if not git_manager.has_repo():
|
||||
return {'status': 'error', 'message': 'Local project repository is not available for undo'}
|
||||
|
||||
try:
|
||||
git_manager.checkout_branch(branch)
|
||||
previous_head = git_manager.current_head_or_none()
|
||||
revert_commit_hash = git_manager.revert_commit(original_commit['commit_hash'])
|
||||
except (subprocess.CalledProcessError, FileNotFoundError) as exc:
|
||||
return {'status': 'error', 'message': f'Unable to revert prompt commit: {exc}'}
|
||||
|
||||
repository = self.db_manager.get_project_audit_data(project_id).get('repository') or {}
|
||||
commit_url = None
|
||||
compare_url = None
|
||||
if (
|
||||
repository.get('mode') == 'project'
|
||||
and repository.get('status') in {'created', 'exists', 'ready'}
|
||||
and settings.gitea_token
|
||||
and repository.get('owner')
|
||||
and repository.get('name')
|
||||
):
|
||||
try:
|
||||
user_info = await self.gitea_api.get_current_user()
|
||||
username = user_info.get('login') if isinstance(user_info, dict) else None
|
||||
if username and not user_info.get('error'):
|
||||
remote_url = repository.get('clone_url') or self.gitea_api.build_repo_git_url(repository.get('owner'), repository.get('name'))
|
||||
if remote_url:
|
||||
git_manager.push_with_credentials(
|
||||
remote_url=remote_url,
|
||||
username=username,
|
||||
password=settings.gitea_token,
|
||||
branch=branch,
|
||||
)
|
||||
commit_url = self.gitea_api.build_commit_url(revert_commit_hash, repository.get('owner'), repository.get('name'))
|
||||
if previous_head:
|
||||
compare_url = self.gitea_api.build_compare_url(previous_head, revert_commit_hash, repository.get('owner'), repository.get('name'))
|
||||
except (RuntimeError, subprocess.CalledProcessError, FileNotFoundError):
|
||||
pass
|
||||
|
||||
self.db_manager.log_commit(
|
||||
project_id=project_id,
|
||||
commit_message=f'Revert prompt {prompt_id}',
|
||||
actor='dashboard',
|
||||
actor_type='user',
|
||||
history_id=history.id,
|
||||
prompt_id=prompt_id,
|
||||
commit_hash=revert_commit_hash,
|
||||
changed_files=original_commit.get('changed_files', []),
|
||||
branch=branch,
|
||||
commit_url=commit_url,
|
||||
compare_url=compare_url,
|
||||
remote_status='reverted',
|
||||
)
|
||||
self.db_manager.log_prompt_revert(
|
||||
project_id=project_id,
|
||||
prompt_id=prompt_id,
|
||||
reverted_commit_hash=original_commit['commit_hash'],
|
||||
revert_commit_hash=revert_commit_hash,
|
||||
actor='dashboard',
|
||||
commit_url=commit_url,
|
||||
)
|
||||
self.db_manager.log_system_event(
|
||||
component='git',
|
||||
level='INFO',
|
||||
message=f'Reverted prompt {prompt_id} for project {project_id}',
|
||||
)
|
||||
return {
|
||||
'status': 'success',
|
||||
'project_id': project_id,
|
||||
'prompt_id': prompt_id,
|
||||
'reverted_commit_hash': original_commit['commit_hash'],
|
||||
'revert_commit_hash': revert_commit_hash,
|
||||
'commit_url': commit_url,
|
||||
'compare_url': compare_url,
|
||||
}
|
||||
502
ai_software_factory/agents/request_interpreter.py
Normal file
502
ai_software_factory/agents/request_interpreter.py
Normal file
@@ -0,0 +1,502 @@
|
||||
"""Interpret free-form software requests into structured generation input."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
try:
|
||||
from ..config import settings
|
||||
from .gitea import GiteaAPI
|
||||
from .llm_service import LLMServiceClient
|
||||
except ImportError:
|
||||
from config import settings
|
||||
from agents.gitea import GiteaAPI
|
||||
from agents.llm_service import LLMServiceClient
|
||||
|
||||
|
||||
class RequestInterpreter:
|
||||
"""Use Ollama to turn free-form text into a structured software request."""
|
||||
|
||||
REQUEST_PREFIX_WORDS = {
|
||||
'a', 'an', 'app', 'application', 'build', 'create', 'dashboard', 'develop', 'design', 'for', 'generate',
|
||||
'internal', 'make', 'me', 'modern', 'need', 'new', 'our', 'platform', 'please', 'project', 'service',
|
||||
'simple', 'site', 'start', 'system', 'the', 'tool', 'us', 'want', 'web', 'website', 'with',
|
||||
}
|
||||
|
||||
REPO_NOISE_WORDS = REQUEST_PREFIX_WORDS | {'and', 'from', 'into', 'on', 'that', 'this', 'to'}
|
||||
GENERIC_PROJECT_NAME_WORDS = {
|
||||
'app', 'application', 'harness', 'platform', 'project', 'purpose', 'service', 'solution', 'suite', 'system', 'test', 'tool',
|
||||
}
|
||||
PLACEHOLDER_PROJECT_NAME_WORDS = {
|
||||
'generated project', 'new project', 'project', 'temporary name', 'temp name', 'placeholder', 'untitled project',
|
||||
}
|
||||
|
||||
def __init__(self, ollama_url: str | None = None, model: str | None = None):
|
||||
self.ollama_url = (ollama_url or settings.ollama_url).rstrip('/')
|
||||
self.model = model or settings.OLLAMA_MODEL
|
||||
self.llm_client = LLMServiceClient(ollama_url=self.ollama_url, model=self.model)
|
||||
self.gitea_api = None
|
||||
if settings.gitea_url and settings.gitea_token:
|
||||
self.gitea_api = GiteaAPI(
|
||||
token=settings.GITEA_TOKEN,
|
||||
base_url=settings.GITEA_URL,
|
||||
owner=settings.GITEA_OWNER,
|
||||
repo=settings.GITEA_REPO or '',
|
||||
)
|
||||
|
||||
async def interpret(self, prompt_text: str, context: dict | None = None) -> dict:
|
||||
"""Interpret free-form text into the request shape expected by the orchestrator."""
|
||||
interpreted, _trace = await self.interpret_with_trace(prompt_text, context=context)
|
||||
return interpreted
|
||||
|
||||
async def interpret_with_trace(self, prompt_text: str, context: dict | None = None) -> tuple[dict, dict]:
|
||||
"""Interpret free-form text into the request shape expected by the orchestrator."""
|
||||
normalized = prompt_text.strip()
|
||||
if not normalized:
|
||||
raise ValueError('Prompt text cannot be empty')
|
||||
|
||||
compact_context = self._build_compact_context(context or {})
|
||||
|
||||
system_prompt = (
|
||||
'You route Telegram software prompts. '
|
||||
'Decide whether the prompt starts a new project or continues an existing tracked project. '
|
||||
'When continuing, identify the best matching project_id from the provided context and the issue number if one is mentioned or implied by recent chat history. '
|
||||
'Return only JSON with keys request and routing. '
|
||||
'request must contain name, description, features, tech_stack. '
|
||||
'routing must contain intent, project_id, project_name, issue_number, confidence, and reasoning_summary. '
|
||||
'Use the provided project catalog and recent chat history. '
|
||||
'If the user says things like also, continue, work on this, that issue, or follow-up wording, prefer continuation of the most relevant recent project. '
|
||||
'If the user explicitly asks for a new project, set intent to new_project.'
|
||||
)
|
||||
user_prompt = normalized
|
||||
if compact_context:
|
||||
user_prompt = (
|
||||
f"Conversation context:\n{json.dumps(compact_context, indent=2)}\n\n"
|
||||
f"User prompt:\n{normalized}"
|
||||
)
|
||||
|
||||
content, trace = await self.llm_client.chat_with_trace(
|
||||
stage='request_interpretation',
|
||||
system_prompt=system_prompt,
|
||||
user_prompt=user_prompt,
|
||||
tool_context_input={
|
||||
'projects': compact_context.get('projects', []),
|
||||
'open_issues': [
|
||||
issue
|
||||
for project in compact_context.get('projects', [])
|
||||
for issue in project.get('open_issues', [])
|
||||
],
|
||||
'recent_chat_history': compact_context.get('recent_chat_history', []),
|
||||
},
|
||||
expect_json=True,
|
||||
)
|
||||
if content:
|
||||
try:
|
||||
parsed = json.loads(content)
|
||||
interpreted = self._normalize_interpreted_request(parsed, normalized)
|
||||
routing = self._normalize_routing(parsed.get('routing'), interpreted, compact_context)
|
||||
if routing.get('intent') == 'continue_project' and routing.get('project_name'):
|
||||
interpreted['name'] = routing['project_name']
|
||||
naming_trace = None
|
||||
if routing.get('intent') == 'new_project':
|
||||
interpreted, routing, naming_trace = await self._refine_new_project_identity(
|
||||
prompt_text=normalized,
|
||||
interpreted=interpreted,
|
||||
routing=routing,
|
||||
context=compact_context,
|
||||
)
|
||||
trace['routing'] = routing
|
||||
trace['context_excerpt'] = compact_context
|
||||
if naming_trace is not None:
|
||||
trace['project_naming'] = naming_trace
|
||||
return interpreted, trace
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
interpreted, routing = self._heuristic_fallback(normalized, compact_context)
|
||||
if routing.get('intent') == 'new_project':
|
||||
constraints = await self._collect_project_identity_constraints(compact_context)
|
||||
routing['repo_name'] = self._ensure_unique_repo_name(routing.get('repo_name') or interpreted.get('name') or 'project', constraints['repo_names'])
|
||||
return interpreted, {
|
||||
'stage': 'request_interpretation',
|
||||
'provider': 'heuristic',
|
||||
'model': self.model,
|
||||
'system_prompt': system_prompt,
|
||||
'user_prompt': user_prompt,
|
||||
'assistant_response': json.dumps({'request': interpreted, 'routing': routing}),
|
||||
'raw_response': {'fallback': 'heuristic', 'llm_trace': trace.get('raw_response') if isinstance(trace, dict) else None},
|
||||
'routing': routing,
|
||||
'context_excerpt': compact_context,
|
||||
'guardrails': trace.get('guardrails') if isinstance(trace, dict) else [],
|
||||
'tool_context': trace.get('tool_context') if isinstance(trace, dict) else [],
|
||||
'fallback_used': True,
|
||||
}
|
||||
|
||||
async def _refine_new_project_identity(
|
||||
self,
|
||||
*,
|
||||
prompt_text: str,
|
||||
interpreted: dict,
|
||||
routing: dict,
|
||||
context: dict,
|
||||
) -> tuple[dict, dict, dict | None]:
|
||||
"""Refine project and repository naming for genuinely new work."""
|
||||
constraints = await self._collect_project_identity_constraints(context)
|
||||
user_prompt = (
|
||||
f"Original user prompt:\n{prompt_text}\n\n"
|
||||
f"Draft structured request:\n{json.dumps(interpreted, indent=2)}\n\n"
|
||||
f"Tracked project names to avoid reusing unless the user clearly wants them:\n{json.dumps(sorted(constraints['project_names']))}\n\n"
|
||||
f"Repository slugs already reserved in tracked projects or Gitea:\n{json.dumps(sorted(constraints['repo_names']))}\n\n"
|
||||
"Suggest the best project display name and repository slug for this new project."
|
||||
)
|
||||
content, trace = await self.llm_client.chat_with_trace(
|
||||
stage='project_naming',
|
||||
system_prompt=settings.llm_project_naming_system_prompt,
|
||||
user_prompt=user_prompt,
|
||||
tool_context_input={
|
||||
'projects': context.get('projects', []),
|
||||
},
|
||||
expect_json=True,
|
||||
)
|
||||
if content:
|
||||
try:
|
||||
fallback_name = self._preferred_project_name_fallback(prompt_text, interpreted.get('name'))
|
||||
parsed = json.loads(content)
|
||||
project_name, repo_name = self._normalize_project_identity(
|
||||
parsed,
|
||||
fallback_name=fallback_name,
|
||||
)
|
||||
repo_name = self._ensure_unique_repo_name(repo_name, constraints['repo_names'])
|
||||
interpreted['name'] = project_name
|
||||
routing['project_name'] = project_name
|
||||
routing['repo_name'] = repo_name
|
||||
return interpreted, routing, trace
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
fallback_name = self._preferred_project_name_fallback(prompt_text, interpreted.get('name'))
|
||||
routing['project_name'] = fallback_name
|
||||
routing['repo_name'] = self._ensure_unique_repo_name(self._derive_repo_name(fallback_name), constraints['repo_names'])
|
||||
return interpreted, routing, trace
|
||||
|
||||
async def _collect_project_identity_constraints(self, context: dict) -> dict[str, set[str]]:
|
||||
"""Collect reserved project names and repository slugs from tracked state and Gitea."""
|
||||
project_names: set[str] = set()
|
||||
repo_names: set[str] = set()
|
||||
for project in context.get('projects', []):
|
||||
if project.get('name'):
|
||||
project_names.add(str(project.get('name')).strip())
|
||||
repository = project.get('repository') or {}
|
||||
if repository.get('name'):
|
||||
repo_names.add(str(repository.get('name')).strip())
|
||||
repo_names.update(await self._load_remote_repo_names())
|
||||
return {
|
||||
'project_names': project_names,
|
||||
'repo_names': repo_names,
|
||||
}
|
||||
|
||||
async def _load_remote_repo_names(self) -> set[str]:
|
||||
"""Load current Gitea repository names when live credentials are available."""
|
||||
if settings.gitea_repo:
|
||||
return {settings.gitea_repo}
|
||||
if self.gitea_api is None or not settings.gitea_owner:
|
||||
return set()
|
||||
repos = await self.gitea_api.list_repositories(owner=settings.gitea_owner)
|
||||
if not isinstance(repos, list):
|
||||
return set()
|
||||
return {str(repo.get('name')).strip() for repo in repos if repo.get('name')}
|
||||
|
||||
def _normalize_interpreted_request(self, interpreted: dict, original_prompt: str) -> dict:
|
||||
"""Normalize LLM output into the required request shape."""
|
||||
request_payload = interpreted.get('request') if isinstance(interpreted.get('request'), dict) else interpreted
|
||||
name = str(interpreted.get('name') or '').strip() or self._derive_name(original_prompt)
|
||||
if isinstance(request_payload, dict):
|
||||
name = str(request_payload.get('name') or '').strip() or self._derive_name(original_prompt)
|
||||
description = str((request_payload or {}).get('description') or '').strip() or original_prompt[:255]
|
||||
features = self._normalize_list((request_payload or {}).get('features'))
|
||||
tech_stack = self._normalize_list((request_payload or {}).get('tech_stack'))
|
||||
if not features:
|
||||
features = ['core workflow based on free-form request']
|
||||
return {
|
||||
'name': name[:255],
|
||||
'description': description[:255],
|
||||
'features': features,
|
||||
'tech_stack': tech_stack,
|
||||
}
|
||||
|
||||
def _build_compact_context(self, context: dict) -> dict:
|
||||
"""Reduce interpreter context to the fields that help routing."""
|
||||
projects = []
|
||||
for project in context.get('projects', [])[:10]:
|
||||
issues = []
|
||||
for issue in project.get('open_issues', [])[:5]:
|
||||
issues.append({'number': issue.get('number'), 'title': issue.get('title'), 'state': issue.get('state')})
|
||||
projects.append(
|
||||
{
|
||||
'project_id': project.get('project_id'),
|
||||
'name': project.get('name'),
|
||||
'description': project.get('description'),
|
||||
'repository': project.get('repository'),
|
||||
'open_pull_request': bool(project.get('open_pull_request')),
|
||||
'open_issues': issues,
|
||||
}
|
||||
)
|
||||
return {
|
||||
'chat_id': context.get('chat_id'),
|
||||
'recent_chat_history': context.get('recent_chat_history', [])[:8],
|
||||
'projects': projects,
|
||||
}
|
||||
|
||||
def _normalize_routing(self, routing: dict | None, interpreted: dict, context: dict) -> dict:
|
||||
"""Normalize routing metadata returned by the LLM."""
|
||||
routing = routing or {}
|
||||
project_id = routing.get('project_id')
|
||||
project_name = routing.get('project_name')
|
||||
issue_number = routing.get('issue_number')
|
||||
if issue_number in ('', None):
|
||||
issue_number = None
|
||||
elif isinstance(issue_number, str) and issue_number.isdigit():
|
||||
issue_number = int(issue_number)
|
||||
matched_project = None
|
||||
for project in context.get('projects', []):
|
||||
if project_id and project.get('project_id') == project_id:
|
||||
matched_project = project
|
||||
break
|
||||
if project_name and project.get('name') == project_name:
|
||||
matched_project = project
|
||||
break
|
||||
intent = str(routing.get('intent') or '').strip() or ('continue_project' if matched_project else 'new_project')
|
||||
if matched_project is None and intent == 'continue_project':
|
||||
recent_chat_history = context.get('recent_chat_history', [])
|
||||
recent_project_id = recent_chat_history[0].get('project_id') if recent_chat_history else None
|
||||
if recent_project_id:
|
||||
matched_project = next(
|
||||
(project for project in context.get('projects', []) if project.get('project_id') == recent_project_id),
|
||||
None,
|
||||
)
|
||||
normalized = {
|
||||
'intent': intent,
|
||||
'project_id': matched_project.get('project_id') if matched_project else project_id,
|
||||
'project_name': matched_project.get('name') if matched_project else (project_name or interpreted.get('name')),
|
||||
'repo_name': routing.get('repo_name') if intent == 'new_project' else None,
|
||||
'issue_number': issue_number,
|
||||
'confidence': routing.get('confidence') or ('medium' if matched_project else 'low'),
|
||||
'reasoning_summary': routing.get('reasoning_summary') or ('Matched prior project context' if matched_project else 'No strong prior project match found'),
|
||||
}
|
||||
if normalized['intent'] == 'new_project' and not normalized['repo_name']:
|
||||
normalized['repo_name'] = self._derive_repo_name(normalized['project_name'] or interpreted.get('name') or 'Generated Project')
|
||||
return normalized
|
||||
|
||||
def _normalize_list(self, value) -> list[str]:
|
||||
if isinstance(value, list):
|
||||
return [str(item).strip() for item in value if str(item).strip()]
|
||||
if isinstance(value, str) and value.strip():
|
||||
return [item.strip() for item in value.split(',') if item.strip()]
|
||||
return []
|
||||
|
||||
def _derive_name(self, prompt_text: str) -> str:
|
||||
"""Derive a stable project name when the LLM does not provide one."""
|
||||
first_line = prompt_text.splitlines()[0].strip()
|
||||
quoted = re.search(r'["\']([^"\']{3,80})["\']', first_line)
|
||||
if quoted:
|
||||
return self._humanize_name(quoted.group(1))
|
||||
|
||||
noun_phrase = re.search(
|
||||
r'(?:build|create|start|make|develop|generate|design|need|want)\s+'
|
||||
r'(?:me\s+|us\s+|an?\s+|the\s+|new\s+|internal\s+|simple\s+|lightweight\s+|modern\s+|web\s+|mobile\s+)*'
|
||||
r'([a-z0-9][a-z0-9\s-]{2,80}?(?:portal|dashboard|app|application|service|tool|system|platform|api|bot|assistant|website|site|workspace|tracker|manager|harness|runner|framework|suite|pipeline|lab))\b',
|
||||
first_line,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
if noun_phrase:
|
||||
return self._humanize_name(noun_phrase.group(1))
|
||||
|
||||
focused_phrase = re.search(
|
||||
r'(?:purpose\s+is\s+to\s+create\s+(?:an?\s+)?)'
|
||||
r'([a-z0-9][a-z0-9\s-]{2,80}?(?:portal|dashboard|app|application|service|tool|system|platform|api|bot|assistant|website|site|workspace|tracker|manager|harness|runner|framework|suite|pipeline|lab))\b',
|
||||
first_line,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
if focused_phrase:
|
||||
return self._humanize_name(focused_phrase.group(1))
|
||||
|
||||
cleaned = re.sub(r'[^A-Za-z0-9 ]+', ' ', first_line)
|
||||
stopwords = {
|
||||
'build', 'create', 'start', 'make', 'develop', 'generate', 'design', 'need', 'want', 'please', 'for', 'our', 'with', 'that', 'this',
|
||||
'new', 'internal', 'simple', 'modern', 'web', 'mobile', 'app', 'application', 'tool', 'system',
|
||||
}
|
||||
tokens = [word for word in cleaned.split() if word and word.lower() not in stopwords]
|
||||
if tokens:
|
||||
return self._humanize_name(' '.join(tokens[:4]))
|
||||
return 'Generated Project'
|
||||
|
||||
def _humanize_name(self, raw_name: str) -> str:
|
||||
"""Normalize a candidate project name into a readable title."""
|
||||
cleaned = re.sub(r'[^A-Za-z0-9\s-]+', ' ', raw_name).strip(' -')
|
||||
cleaned = re.sub(r'\s+', ' ', cleaned)
|
||||
cleaned = self._trim_request_prefix(cleaned)
|
||||
special_upper = {'api', 'crm', 'erp', 'cms', 'hr', 'it', 'ui', 'qa'}
|
||||
words = []
|
||||
for word in cleaned.split()[:6]:
|
||||
lowered = word.lower()
|
||||
words.append(lowered.upper() if lowered in special_upper else lowered.capitalize())
|
||||
return ' '.join(words) or 'Generated Project'
|
||||
|
||||
def _trim_request_prefix(self, candidate: str) -> str:
|
||||
"""Remove leading request phrasing from model-produced names and slugs."""
|
||||
tokens = [token for token in re.split(r'[-\s]+', candidate or '') if token]
|
||||
while tokens and tokens[0].lower() in self.REQUEST_PREFIX_WORDS:
|
||||
tokens.pop(0)
|
||||
trimmed = ' '.join(tokens).strip()
|
||||
return trimmed or candidate.strip()
|
||||
|
||||
def _derive_repo_name(self, project_name: str) -> str:
|
||||
"""Derive a repository slug from a human-readable project name."""
|
||||
preferred_name = self._trim_request_prefix((project_name or 'project').strip())
|
||||
preferred = preferred_name.lower().replace(' ', '-')
|
||||
sanitized = ''.join(ch if ch.isalnum() or ch in {'-', '_'} else '-' for ch in preferred)
|
||||
while '--' in sanitized:
|
||||
sanitized = sanitized.replace('--', '-')
|
||||
return sanitized.strip('-') or 'project'
|
||||
|
||||
def _should_use_repo_name_candidate(self, candidate: str, project_name: str) -> bool:
|
||||
"""Return whether a model-proposed repo slug is concise enough to trust directly."""
|
||||
cleaned = self._trim_request_prefix(re.sub(r'[^A-Za-z0-9\s_-]+', ' ', candidate or '').strip())
|
||||
if not cleaned:
|
||||
return False
|
||||
candidate_tokens = [token.lower() for token in re.split(r'[-\s_]+', cleaned) if token]
|
||||
if not candidate_tokens:
|
||||
return False
|
||||
if len(candidate_tokens) > 6:
|
||||
return False
|
||||
noise_count = sum(1 for token in candidate_tokens if token in self.REPO_NOISE_WORDS)
|
||||
if noise_count >= 2:
|
||||
return False
|
||||
if len('-'.join(candidate_tokens)) > 40:
|
||||
return False
|
||||
project_tokens = {
|
||||
token.lower()
|
||||
for token in re.split(r'[-\s_]+', project_name or '')
|
||||
if token and token.lower() not in self.REPO_NOISE_WORDS
|
||||
}
|
||||
if project_tokens:
|
||||
overlap = sum(1 for token in candidate_tokens if token in project_tokens)
|
||||
if overlap == 0:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _should_use_project_name_candidate(self, candidate: str, fallback_name: str) -> bool:
|
||||
"""Return whether a model-proposed project title is concrete enough to trust."""
|
||||
cleaned = self._trim_request_prefix(re.sub(r'[^A-Za-z0-9\s-]+', ' ', candidate or '').strip())
|
||||
if not cleaned:
|
||||
return False
|
||||
candidate_tokens = [token.lower() for token in re.split(r'[-\s]+', cleaned) if token]
|
||||
if not candidate_tokens:
|
||||
return False
|
||||
if len(candidate_tokens) == 1 and candidate_tokens[0] in self.GENERIC_PROJECT_NAME_WORDS:
|
||||
return False
|
||||
if all(token in self.GENERIC_PROJECT_NAME_WORDS for token in candidate_tokens):
|
||||
return False
|
||||
fallback_tokens = {
|
||||
token.lower() for token in re.split(r'[-\s]+', fallback_name or '') if token and token.lower() not in self.REPO_NOISE_WORDS
|
||||
}
|
||||
if fallback_tokens and len(candidate_tokens) <= 2:
|
||||
overlap = sum(1 for token in candidate_tokens if token in fallback_tokens)
|
||||
if overlap == 0 and any(token in self.GENERIC_PROJECT_NAME_WORDS for token in candidate_tokens):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _preferred_project_name_fallback(self, prompt_text: str, interpreted_name: str | None) -> str:
|
||||
"""Pick the best fallback title when the earlier interpretation produced a placeholder."""
|
||||
interpreted_clean = self._humanize_name(str(interpreted_name or '').strip()) if interpreted_name else ''
|
||||
normalized_interpreted = interpreted_clean.lower()
|
||||
if normalized_interpreted and normalized_interpreted not in self.PLACEHOLDER_PROJECT_NAME_WORDS:
|
||||
if not (len(normalized_interpreted.split()) == 1 and normalized_interpreted in self.GENERIC_PROJECT_NAME_WORDS):
|
||||
return interpreted_clean
|
||||
return self._derive_name(prompt_text)
|
||||
|
||||
def _ensure_unique_repo_name(self, repo_name: str, reserved_names: set[str]) -> str:
|
||||
"""Choose a repository slug that does not collide with tracked or remote repositories."""
|
||||
base_name = self._derive_repo_name(repo_name)
|
||||
if base_name not in reserved_names:
|
||||
return base_name
|
||||
suffix = 2
|
||||
while f'{base_name}-{suffix}' in reserved_names:
|
||||
suffix += 1
|
||||
return f'{base_name}-{suffix}'
|
||||
|
||||
def _normalize_project_identity(self, payload: dict, fallback_name: str) -> tuple[str, str]:
|
||||
"""Normalize model-proposed project and repository naming."""
|
||||
fallback_project_name = self._humanize_name(str(fallback_name or 'Generated Project'))
|
||||
project_candidate = str(payload.get('project_name') or payload.get('name') or '').strip()
|
||||
project_name = fallback_project_name
|
||||
if project_candidate and self._should_use_project_name_candidate(project_candidate, fallback_project_name):
|
||||
project_name = self._humanize_name(project_candidate)
|
||||
repo_candidate = str(payload.get('repo_name') or '').strip()
|
||||
repo_name = self._derive_repo_name(project_name)
|
||||
if repo_candidate and self._should_use_repo_name_candidate(repo_candidate, project_name):
|
||||
repo_name = self._derive_repo_name(repo_candidate)
|
||||
return project_name, repo_name
|
||||
|
||||
def _heuristic_fallback(self, prompt_text: str, context: dict | None = None) -> tuple[dict, dict]:
|
||||
"""Fallback request extraction when Ollama is unavailable."""
|
||||
lowered = prompt_text.lower()
|
||||
tech_candidates = [
|
||||
'python', 'fastapi', 'django', 'flask', 'postgresql', 'sqlite', 'react', 'vue', 'nicegui', 'docker'
|
||||
]
|
||||
tech_stack = [candidate for candidate in tech_candidates if candidate in lowered]
|
||||
sentences = [part.strip() for part in re.split(r'[\n\.]+', prompt_text) if part.strip()]
|
||||
features = sentences[:3] or ['Implement the user request from free-form text']
|
||||
interpreted = {
|
||||
'name': self._derive_name(prompt_text),
|
||||
'description': sentences[0][:255] if sentences else prompt_text[:255],
|
||||
'features': features,
|
||||
'tech_stack': tech_stack,
|
||||
}
|
||||
routing = self._heuristic_routing(prompt_text, context or {})
|
||||
if routing.get('project_name'):
|
||||
interpreted['name'] = routing['project_name']
|
||||
return interpreted, routing
|
||||
|
||||
def _heuristic_routing(self, prompt_text: str, context: dict) -> dict:
|
||||
"""Best-effort routing when the LLM is unavailable."""
|
||||
lowered = prompt_text.lower()
|
||||
explicit_new = any(token in lowered for token in ['new project', 'start a new project', 'create a new project', 'build a new app'])
|
||||
referenced_issue = self._extract_issue_number(prompt_text)
|
||||
recent_history = context.get('recent_chat_history', [])
|
||||
projects = context.get('projects', [])
|
||||
last_project_id = recent_history[0].get('project_id') if recent_history else None
|
||||
last_issue = ((recent_history[0].get('related_issue') or {}).get('number') if recent_history else None)
|
||||
|
||||
matched_project = None
|
||||
for project in projects:
|
||||
name = (project.get('name') or '').lower()
|
||||
repo = ((project.get('repository') or {}).get('name') or '').lower()
|
||||
if name and name in lowered:
|
||||
matched_project = project
|
||||
break
|
||||
if repo and repo in lowered:
|
||||
matched_project = project
|
||||
break
|
||||
if matched_project is None and not explicit_new:
|
||||
follow_up_tokens = ['also', 'continue', 'for this project', 'for that project', 'work on this', 'work on that', 'fix that', 'add this']
|
||||
if any(token in lowered for token in follow_up_tokens) and last_project_id:
|
||||
matched_project = next((project for project in projects if project.get('project_id') == last_project_id), None)
|
||||
issue_number = referenced_issue
|
||||
if issue_number is None and any(token in lowered for token in ['that issue', 'this issue', 'the issue']) and last_issue is not None:
|
||||
issue_number = last_issue
|
||||
intent = 'new_project' if explicit_new or matched_project is None else 'continue_project'
|
||||
return {
|
||||
'intent': intent,
|
||||
'project_id': matched_project.get('project_id') if matched_project else None,
|
||||
'project_name': matched_project.get('name') if matched_project else self._derive_name(prompt_text),
|
||||
'repo_name': None if matched_project else self._derive_repo_name(self._derive_name(prompt_text)),
|
||||
'issue_number': issue_number,
|
||||
'confidence': 'medium' if matched_project or explicit_new else 'low',
|
||||
'reasoning_summary': 'Heuristic routing from chat history and project names.',
|
||||
}
|
||||
|
||||
def _extract_issue_number(self, prompt_text: str) -> int | None:
|
||||
match = re.search(r'(?:#|issue\s+)(\d+)', prompt_text, flags=re.IGNORECASE)
|
||||
return int(match.group(1)) if match else None
|
||||
@@ -1,8 +1,6 @@
|
||||
"""Telegram bot integration for n8n webhook."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@@ -12,6 +10,59 @@ class TelegramHandler:
|
||||
def __init__(self, webhook_url: str):
|
||||
self.webhook_url = webhook_url
|
||||
self.api_url = "https://api.telegram.org/bot"
|
||||
|
||||
def build_prompt_guide_message(self, backend_url: str | None = None) -> str:
|
||||
"""Build a Telegram message explaining the expected prompt format."""
|
||||
lines = [
|
||||
"AI Software Factory is listening in this chat.",
|
||||
"",
|
||||
"You can send free-form software requests in normal language.",
|
||||
"",
|
||||
"Example:",
|
||||
"Build an internal inventory portal for our warehouse team.",
|
||||
"It should support role-based login, stock dashboards, and purchase orders.",
|
||||
"Prefer FastAPI, PostgreSQL, and a simple web UI.",
|
||||
"",
|
||||
"The backend will interpret the request and turn it into a structured project plan.",
|
||||
]
|
||||
if backend_url:
|
||||
lines.extend(["", f"Backend target: {backend_url}"])
|
||||
return "\n".join(lines)
|
||||
|
||||
async def send_message(self, bot_token: str, chat_id: str | int, text: str) -> dict:
|
||||
"""Send a direct Telegram message using the configured bot."""
|
||||
if not bot_token:
|
||||
return {"status": "error", "message": "Telegram bot token is not configured"}
|
||||
if chat_id in (None, ""):
|
||||
return {"status": "error", "message": "Telegram chat id is not configured"}
|
||||
|
||||
api_endpoint = f"{self.api_url}{bot_token}/sendMessage"
|
||||
|
||||
try:
|
||||
import aiohttp
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
api_endpoint,
|
||||
json={
|
||||
"chat_id": str(chat_id),
|
||||
"text": text,
|
||||
},
|
||||
) as resp:
|
||||
payload = await resp.json()
|
||||
if 200 <= resp.status < 300 and payload.get("ok"):
|
||||
return {
|
||||
"status": "success",
|
||||
"message": "Telegram prompt guide sent successfully",
|
||||
"payload": payload,
|
||||
}
|
||||
description = payload.get("description") or payload.get("message") or str(payload)
|
||||
return {
|
||||
"status": "error",
|
||||
"message": f"Telegram API returned {resp.status}: {description}",
|
||||
"payload": payload,
|
||||
}
|
||||
except Exception as exc:
|
||||
return {"status": "error", "message": str(exc)}
|
||||
|
||||
async def handle_message(self, message_data: dict) -> dict:
|
||||
"""Handle incoming Telegram message."""
|
||||
|
||||
@@ -1,12 +1,210 @@
|
||||
"""Configuration settings for AI Software Factory."""
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import Optional
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
def _normalize_service_url(value: str, default_scheme: str = "https") -> str:
|
||||
"""Normalize service URLs so host-only values still become valid absolute URLs."""
|
||||
normalized = (value or "").strip().rstrip("/")
|
||||
if not normalized:
|
||||
return ""
|
||||
if "://" not in normalized:
|
||||
normalized = f"{default_scheme}://{normalized}"
|
||||
parsed = urlparse(normalized)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
return ""
|
||||
return normalized
|
||||
|
||||
|
||||
EDITABLE_LLM_PROMPTS: dict[str, dict[str, str]] = {
|
||||
'LLM_GUARDRAIL_PROMPT': {
|
||||
'label': 'Global Guardrails',
|
||||
'category': 'guardrail',
|
||||
'description': 'Applied to every outbound external LLM call.',
|
||||
},
|
||||
'LLM_REQUEST_INTERPRETER_GUARDRAIL_PROMPT': {
|
||||
'label': 'Request Interpretation Guardrails',
|
||||
'category': 'guardrail',
|
||||
'description': 'Constrains project routing and continuation selection.',
|
||||
},
|
||||
'LLM_CHANGE_SUMMARY_GUARDRAIL_PROMPT': {
|
||||
'label': 'Change Summary Guardrails',
|
||||
'category': 'guardrail',
|
||||
'description': 'Constrains factual delivery summaries.',
|
||||
},
|
||||
'LLM_PROJECT_NAMING_GUARDRAIL_PROMPT': {
|
||||
'label': 'Project Naming Guardrails',
|
||||
'category': 'guardrail',
|
||||
'description': 'Constrains project display names and repo slugs.',
|
||||
},
|
||||
'LLM_PROJECT_NAMING_SYSTEM_PROMPT': {
|
||||
'label': 'Project Naming System Prompt',
|
||||
'category': 'system_prompt',
|
||||
'description': 'Guides the dedicated new-project naming stage.',
|
||||
},
|
||||
'LLM_PROJECT_ID_GUARDRAIL_PROMPT': {
|
||||
'label': 'Project ID Guardrails',
|
||||
'category': 'guardrail',
|
||||
'description': 'Constrains stable project id generation.',
|
||||
},
|
||||
'LLM_PROJECT_ID_SYSTEM_PROMPT': {
|
||||
'label': 'Project ID System Prompt',
|
||||
'category': 'system_prompt',
|
||||
'description': 'Guides the dedicated project id naming stage.',
|
||||
},
|
||||
}
|
||||
|
||||
EDITABLE_RUNTIME_SETTINGS: dict[str, dict[str, str]] = {
|
||||
'HOME_ASSISTANT_BATTERY_ENTITY_ID': {
|
||||
'label': 'Battery Entity ID',
|
||||
'category': 'home_assistant',
|
||||
'description': 'Home Assistant entity used for battery state-of-charge gating.',
|
||||
'value_type': 'string',
|
||||
},
|
||||
'HOME_ASSISTANT_SURPLUS_ENTITY_ID': {
|
||||
'label': 'Surplus Power Entity ID',
|
||||
'category': 'home_assistant',
|
||||
'description': 'Home Assistant entity used for export or surplus power gating.',
|
||||
'value_type': 'string',
|
||||
},
|
||||
'HOME_ASSISTANT_BATTERY_FULL_THRESHOLD': {
|
||||
'label': 'Battery Full Threshold',
|
||||
'category': 'home_assistant',
|
||||
'description': 'Minimum battery percentage required before queued prompts may run.',
|
||||
'value_type': 'float',
|
||||
},
|
||||
'HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS': {
|
||||
'label': 'Surplus Threshold Watts',
|
||||
'category': 'home_assistant',
|
||||
'description': 'Minimum surplus/export power required before queued prompts may run.',
|
||||
'value_type': 'float',
|
||||
},
|
||||
'PROMPT_QUEUE_ENABLED': {
|
||||
'label': 'Queue Telegram Prompts',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'When enabled, Telegram prompts are queued and gated instead of processed immediately.',
|
||||
'value_type': 'boolean',
|
||||
},
|
||||
'PROMPT_QUEUE_AUTO_PROCESS': {
|
||||
'label': 'Auto Process Queue',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'Let the background worker drain the queue automatically when the gate is open.',
|
||||
'value_type': 'boolean',
|
||||
},
|
||||
'PROMPT_QUEUE_FORCE_PROCESS': {
|
||||
'label': 'Force Queue Processing',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'Bypass the Home Assistant energy gate for queued prompts.',
|
||||
'value_type': 'boolean',
|
||||
},
|
||||
'PROMPT_QUEUE_POLL_INTERVAL_SECONDS': {
|
||||
'label': 'Queue Poll Interval Seconds',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'Polling interval for the background queue worker.',
|
||||
'value_type': 'integer',
|
||||
},
|
||||
'PROMPT_QUEUE_MAX_BATCH_SIZE': {
|
||||
'label': 'Queue Max Batch Size',
|
||||
'category': 'prompt_queue',
|
||||
'description': 'Maximum number of queued prompts processed in one batch.',
|
||||
'value_type': 'integer',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _get_persisted_llm_prompt_override(env_key: str) -> str | None:
|
||||
"""Load one persisted LLM prompt override from the database when available."""
|
||||
if env_key not in EDITABLE_LLM_PROMPTS:
|
||||
return None
|
||||
try:
|
||||
try:
|
||||
from .database import get_db_sync
|
||||
from .agents.database_manager import DatabaseManager
|
||||
except ImportError:
|
||||
from database import get_db_sync
|
||||
from agents.database_manager import DatabaseManager
|
||||
|
||||
db = get_db_sync()
|
||||
if db is None:
|
||||
return None
|
||||
try:
|
||||
return DatabaseManager(db).get_llm_prompt_override(env_key)
|
||||
finally:
|
||||
db.close()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _resolve_llm_prompt_value(env_key: str, fallback: str) -> str:
|
||||
"""Resolve one editable prompt from DB override first, then environment/defaults."""
|
||||
override = _get_persisted_llm_prompt_override(env_key)
|
||||
if override is not None:
|
||||
return override.strip()
|
||||
return (fallback or '').strip()
|
||||
|
||||
|
||||
def _get_persisted_runtime_setting_override(key: str):
|
||||
"""Load one persisted runtime-setting override from the database when available."""
|
||||
if key not in EDITABLE_RUNTIME_SETTINGS:
|
||||
return None
|
||||
try:
|
||||
try:
|
||||
from .database import get_db_sync
|
||||
from .agents.database_manager import DatabaseManager
|
||||
except ImportError:
|
||||
from database import get_db_sync
|
||||
from agents.database_manager import DatabaseManager
|
||||
|
||||
db = get_db_sync()
|
||||
if db is None:
|
||||
return None
|
||||
try:
|
||||
return DatabaseManager(db).get_runtime_setting_override(key)
|
||||
finally:
|
||||
db.close()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _coerce_runtime_setting_value(key: str, value, fallback):
|
||||
"""Coerce a persisted runtime setting override into the expected scalar type."""
|
||||
value_type = EDITABLE_RUNTIME_SETTINGS.get(key, {}).get('value_type')
|
||||
if value is None:
|
||||
return fallback
|
||||
if value_type == 'boolean':
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
normalized = str(value).strip().lower()
|
||||
if normalized in {'1', 'true', 'yes', 'on'}:
|
||||
return True
|
||||
if normalized in {'0', 'false', 'no', 'off'}:
|
||||
return False
|
||||
return bool(fallback)
|
||||
if value_type == 'integer':
|
||||
try:
|
||||
return int(value)
|
||||
except Exception:
|
||||
return int(fallback)
|
||||
if value_type == 'float':
|
||||
try:
|
||||
return float(value)
|
||||
except Exception:
|
||||
return float(fallback)
|
||||
return str(value).strip()
|
||||
|
||||
|
||||
def _resolve_runtime_setting_value(key: str, fallback):
|
||||
"""Resolve one editable runtime setting from DB override first, then environment/defaults."""
|
||||
override = _get_persisted_runtime_setting_override(key)
|
||||
return _coerce_runtime_setting_value(key, override, fallback)
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""Application settings loaded from environment variables."""
|
||||
|
||||
@@ -24,6 +222,34 @@ class Settings(BaseSettings):
|
||||
# Ollama settings computed from environment
|
||||
OLLAMA_URL: str = "http://ollama:11434"
|
||||
OLLAMA_MODEL: str = "llama3"
|
||||
LLM_GUARDRAIL_PROMPT: str = (
|
||||
"You are operating inside AI Software Factory. Follow the requested schema exactly, "
|
||||
"treat provided tool outputs as authoritative, and do not invent repositories, issues, pull requests, or delivery facts."
|
||||
)
|
||||
LLM_REQUEST_INTERPRETER_GUARDRAIL_PROMPT: str = (
|
||||
"For routing and request interpretation: never select archived projects, prefer tracked project IDs from tool outputs, and only reference issues that are explicit in the prompt or available tool data."
|
||||
)
|
||||
LLM_CHANGE_SUMMARY_GUARDRAIL_PROMPT: str = (
|
||||
"For summaries: only describe facts present in the provided context and tool outputs. Never claim a repository, commit, or pull request exists unless it is present in the supplied data."
|
||||
)
|
||||
LLM_PROJECT_NAMING_GUARDRAIL_PROMPT: str = (
|
||||
"For project naming: prefer clear, product-like names and repository slugs that match the user's concrete deliverable. Avoid abstract or instructional words such as purpose, project, system, app, tool, platform, solution, new, create, or test unless the request truly centers on that exact noun. Base the name on the actual artifact or workflow being built, and avoid copying sentence fragments from the prompt. Avoid reusing tracked project identities unless the request is clearly asking for an existing project."
|
||||
)
|
||||
LLM_PROJECT_NAMING_SYSTEM_PROMPT: str = (
|
||||
"You name newly requested software projects. Return only JSON with keys project_name, repo_name, and rationale. Project names should be concise human-readable titles based on the real product, artifact, or workflow being created. Repo names should be lowercase kebab-case slugs derived from that title. Never return generic names like purpose, project, system, app, tool, platform, solution, harness, or test by themselves, and never return a repo_name that is a copied sentence fragment from the prompt. Prefer 2 to 4 specific words when possible."
|
||||
)
|
||||
LLM_PROJECT_ID_GUARDRAIL_PROMPT: str = (
|
||||
"For project ids: produce short stable slugs for newly created projects. Avoid collisions with known project ids and keep ids lowercase with hyphens."
|
||||
)
|
||||
LLM_PROJECT_ID_SYSTEM_PROMPT: str = (
|
||||
"You derive stable project ids for new projects. Return only JSON with keys project_id and rationale. project_id must be a short lowercase kebab-case slug without spaces."
|
||||
)
|
||||
LLM_TOOL_ALLOWLIST: str = "gitea_project_catalog,gitea_project_state,gitea_project_issues,gitea_pull_requests"
|
||||
LLM_TOOL_CONTEXT_LIMIT: int = 5
|
||||
LLM_LIVE_TOOL_ALLOWLIST: str = "gitea_lookup_issue,gitea_lookup_pull_request"
|
||||
LLM_LIVE_TOOL_STAGE_ALLOWLIST: str = "request_interpretation,change_summary"
|
||||
LLM_LIVE_TOOL_STAGE_TOOL_MAP: str = ""
|
||||
LLM_MAX_TOOL_CALL_ROUNDS: int = 1
|
||||
|
||||
# Gitea settings
|
||||
GITEA_URL: str = "https://gitea.yourserver.com"
|
||||
@@ -47,6 +273,19 @@ class Settings(BaseSettings):
|
||||
TELEGRAM_BOT_TOKEN: str = ""
|
||||
TELEGRAM_CHAT_ID: str = ""
|
||||
|
||||
# Home Assistant and prompt queue settings
|
||||
HOME_ASSISTANT_URL: str = ""
|
||||
HOME_ASSISTANT_TOKEN: str = ""
|
||||
HOME_ASSISTANT_BATTERY_ENTITY_ID: str = ""
|
||||
HOME_ASSISTANT_SURPLUS_ENTITY_ID: str = ""
|
||||
HOME_ASSISTANT_BATTERY_FULL_THRESHOLD: float = 95.0
|
||||
HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS: float = 100.0
|
||||
PROMPT_QUEUE_ENABLED: bool = False
|
||||
PROMPT_QUEUE_AUTO_PROCESS: bool = True
|
||||
PROMPT_QUEUE_FORCE_PROCESS: bool = False
|
||||
PROMPT_QUEUE_POLL_INTERVAL_SECONDS: int = 60
|
||||
PROMPT_QUEUE_MAX_BATCH_SIZE: int = 1
|
||||
|
||||
# PostgreSQL settings
|
||||
POSTGRES_HOST: str = "localhost"
|
||||
POSTGRES_PORT: int = 5432
|
||||
@@ -131,10 +370,138 @@ class Settings(BaseSettings):
|
||||
"""Get Ollama URL with trimmed whitespace."""
|
||||
return self.OLLAMA_URL.strip()
|
||||
|
||||
@property
|
||||
def llm_guardrail_prompt(self) -> str:
|
||||
"""Get the global guardrail prompt used for all external LLM calls."""
|
||||
return _resolve_llm_prompt_value('LLM_GUARDRAIL_PROMPT', self.LLM_GUARDRAIL_PROMPT)
|
||||
|
||||
@property
|
||||
def llm_request_interpreter_guardrail_prompt(self) -> str:
|
||||
"""Get the request-interpretation specific guardrail prompt."""
|
||||
return _resolve_llm_prompt_value('LLM_REQUEST_INTERPRETER_GUARDRAIL_PROMPT', self.LLM_REQUEST_INTERPRETER_GUARDRAIL_PROMPT)
|
||||
|
||||
@property
|
||||
def llm_change_summary_guardrail_prompt(self) -> str:
|
||||
"""Get the change-summary specific guardrail prompt."""
|
||||
return _resolve_llm_prompt_value('LLM_CHANGE_SUMMARY_GUARDRAIL_PROMPT', self.LLM_CHANGE_SUMMARY_GUARDRAIL_PROMPT)
|
||||
|
||||
@property
|
||||
def llm_project_naming_guardrail_prompt(self) -> str:
|
||||
"""Get the project-naming specific guardrail prompt."""
|
||||
return _resolve_llm_prompt_value('LLM_PROJECT_NAMING_GUARDRAIL_PROMPT', self.LLM_PROJECT_NAMING_GUARDRAIL_PROMPT)
|
||||
|
||||
@property
|
||||
def llm_project_naming_system_prompt(self) -> str:
|
||||
"""Get the project-naming system prompt."""
|
||||
return _resolve_llm_prompt_value('LLM_PROJECT_NAMING_SYSTEM_PROMPT', self.LLM_PROJECT_NAMING_SYSTEM_PROMPT)
|
||||
|
||||
@property
|
||||
def llm_project_id_guardrail_prompt(self) -> str:
|
||||
"""Get the project-id naming specific guardrail prompt."""
|
||||
return _resolve_llm_prompt_value('LLM_PROJECT_ID_GUARDRAIL_PROMPT', self.LLM_PROJECT_ID_GUARDRAIL_PROMPT)
|
||||
|
||||
@property
|
||||
def llm_project_id_system_prompt(self) -> str:
|
||||
"""Get the project-id naming system prompt."""
|
||||
return _resolve_llm_prompt_value('LLM_PROJECT_ID_SYSTEM_PROMPT', self.LLM_PROJECT_ID_SYSTEM_PROMPT)
|
||||
|
||||
@property
|
||||
def editable_llm_prompts(self) -> list[dict[str, str]]:
|
||||
"""Return metadata for all LLM prompts that may be persisted and edited from the UI."""
|
||||
prompts = []
|
||||
for env_key, metadata in EDITABLE_LLM_PROMPTS.items():
|
||||
prompts.append(
|
||||
{
|
||||
'key': env_key,
|
||||
'label': metadata['label'],
|
||||
'category': metadata['category'],
|
||||
'description': metadata['description'],
|
||||
'default_value': (getattr(self, env_key, '') or '').strip(),
|
||||
'value': _resolve_llm_prompt_value(env_key, getattr(self, env_key, '')),
|
||||
}
|
||||
)
|
||||
return prompts
|
||||
|
||||
@property
|
||||
def editable_runtime_settings(self) -> list[dict]:
|
||||
"""Return metadata for all DB-editable runtime settings."""
|
||||
items = []
|
||||
for key, metadata in EDITABLE_RUNTIME_SETTINGS.items():
|
||||
default_value = getattr(self, key)
|
||||
value = _resolve_runtime_setting_value(key, default_value)
|
||||
items.append(
|
||||
{
|
||||
'key': key,
|
||||
'label': metadata['label'],
|
||||
'category': metadata['category'],
|
||||
'description': metadata['description'],
|
||||
'value_type': metadata['value_type'],
|
||||
'default_value': default_value,
|
||||
'value': value,
|
||||
}
|
||||
)
|
||||
return items
|
||||
|
||||
@property
|
||||
def llm_tool_allowlist(self) -> list[str]:
|
||||
"""Get the allowed LLM tool names as a normalized list."""
|
||||
return [item.strip() for item in self.LLM_TOOL_ALLOWLIST.split(',') if item.strip()]
|
||||
|
||||
@property
|
||||
def llm_tool_context_limit(self) -> int:
|
||||
"""Get the number of items to expose per mediated tool payload."""
|
||||
return max(int(self.LLM_TOOL_CONTEXT_LIMIT), 1)
|
||||
|
||||
@property
|
||||
def llm_live_tool_allowlist(self) -> list[str]:
|
||||
"""Get the allowed live tool-call names for model-driven lookup requests."""
|
||||
return [item.strip() for item in self.LLM_LIVE_TOOL_ALLOWLIST.split(',') if item.strip()]
|
||||
|
||||
@property
|
||||
def llm_live_tool_stage_allowlist(self) -> list[str]:
|
||||
"""Get the LLM stages where live tool requests are enabled."""
|
||||
return [item.strip() for item in self.LLM_LIVE_TOOL_STAGE_ALLOWLIST.split(',') if item.strip()]
|
||||
|
||||
@property
|
||||
def llm_live_tool_stage_tool_map(self) -> dict[str, list[str]]:
|
||||
"""Get an optional per-stage live tool map that overrides the simple stage allowlist."""
|
||||
raw = (self.LLM_LIVE_TOOL_STAGE_TOOL_MAP or '').strip()
|
||||
if not raw:
|
||||
return {}
|
||||
try:
|
||||
parsed = json.loads(raw)
|
||||
except Exception:
|
||||
return {}
|
||||
if not isinstance(parsed, dict):
|
||||
return {}
|
||||
allowed_tools = set(self.llm_live_tool_allowlist)
|
||||
normalized: dict[str, list[str]] = {}
|
||||
for stage, tools in parsed.items():
|
||||
if not isinstance(stage, str):
|
||||
continue
|
||||
if not isinstance(tools, list):
|
||||
continue
|
||||
normalized[stage.strip()] = [str(tool).strip() for tool in tools if str(tool).strip() in allowed_tools]
|
||||
return normalized
|
||||
|
||||
def llm_live_tools_for_stage(self, stage: str) -> list[str]:
|
||||
"""Return live tools enabled for a specific LLM stage."""
|
||||
stage_map = self.llm_live_tool_stage_tool_map
|
||||
if stage_map:
|
||||
return stage_map.get(stage, [])
|
||||
if stage not in set(self.llm_live_tool_stage_allowlist):
|
||||
return []
|
||||
return self.llm_live_tool_allowlist
|
||||
|
||||
@property
|
||||
def llm_max_tool_call_rounds(self) -> int:
|
||||
"""Get the maximum number of model-driven live tool-call rounds per LLM request."""
|
||||
return max(int(self.LLM_MAX_TOOL_CALL_ROUNDS), 0)
|
||||
|
||||
@property
|
||||
def gitea_url(self) -> str:
|
||||
"""Get Gitea URL with trimmed whitespace."""
|
||||
return self.GITEA_URL.strip()
|
||||
return _normalize_service_url(self.GITEA_URL)
|
||||
|
||||
@property
|
||||
def gitea_token(self) -> str:
|
||||
@@ -159,12 +526,12 @@ class Settings(BaseSettings):
|
||||
@property
|
||||
def n8n_webhook_url(self) -> str:
|
||||
"""Get n8n webhook URL with trimmed whitespace."""
|
||||
return self.N8N_WEBHOOK_URL.strip()
|
||||
return _normalize_service_url(self.N8N_WEBHOOK_URL, default_scheme="http")
|
||||
|
||||
@property
|
||||
def n8n_api_url(self) -> str:
|
||||
"""Get n8n API URL with trimmed whitespace."""
|
||||
return self.N8N_API_URL.strip()
|
||||
return _normalize_service_url(self.N8N_API_URL, default_scheme="http")
|
||||
|
||||
@property
|
||||
def n8n_api_key(self) -> str:
|
||||
@@ -189,7 +556,62 @@ class Settings(BaseSettings):
|
||||
@property
|
||||
def backend_public_url(self) -> str:
|
||||
"""Get backend public URL with trimmed whitespace."""
|
||||
return self.BACKEND_PUBLIC_URL.strip().rstrip("/")
|
||||
return _normalize_service_url(self.BACKEND_PUBLIC_URL, default_scheme="http")
|
||||
|
||||
@property
|
||||
def home_assistant_url(self) -> str:
|
||||
"""Get Home Assistant URL with trimmed whitespace."""
|
||||
return _normalize_service_url(self.HOME_ASSISTANT_URL, default_scheme="http")
|
||||
|
||||
@property
|
||||
def home_assistant_token(self) -> str:
|
||||
"""Get Home Assistant token with trimmed whitespace."""
|
||||
return self.HOME_ASSISTANT_TOKEN.strip()
|
||||
|
||||
@property
|
||||
def home_assistant_battery_entity_id(self) -> str:
|
||||
"""Get the Home Assistant battery state entity id."""
|
||||
return str(_resolve_runtime_setting_value('HOME_ASSISTANT_BATTERY_ENTITY_ID', self.HOME_ASSISTANT_BATTERY_ENTITY_ID)).strip()
|
||||
|
||||
@property
|
||||
def home_assistant_surplus_entity_id(self) -> str:
|
||||
"""Get the Home Assistant surplus power entity id."""
|
||||
return str(_resolve_runtime_setting_value('HOME_ASSISTANT_SURPLUS_ENTITY_ID', self.HOME_ASSISTANT_SURPLUS_ENTITY_ID)).strip()
|
||||
|
||||
@property
|
||||
def home_assistant_battery_full_threshold(self) -> float:
|
||||
"""Get the minimum battery SoC percentage for queue processing."""
|
||||
return float(_resolve_runtime_setting_value('HOME_ASSISTANT_BATTERY_FULL_THRESHOLD', self.HOME_ASSISTANT_BATTERY_FULL_THRESHOLD))
|
||||
|
||||
@property
|
||||
def home_assistant_surplus_threshold_watts(self) -> float:
|
||||
"""Get the minimum export/surplus power threshold for queue processing."""
|
||||
return float(_resolve_runtime_setting_value('HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS', self.HOME_ASSISTANT_SURPLUS_THRESHOLD_WATTS))
|
||||
|
||||
@property
|
||||
def prompt_queue_enabled(self) -> bool:
|
||||
"""Whether Telegram prompts should be queued instead of processed immediately."""
|
||||
return bool(_resolve_runtime_setting_value('PROMPT_QUEUE_ENABLED', self.PROMPT_QUEUE_ENABLED))
|
||||
|
||||
@property
|
||||
def prompt_queue_auto_process(self) -> bool:
|
||||
"""Whether the background worker should automatically process queued prompts."""
|
||||
return bool(_resolve_runtime_setting_value('PROMPT_QUEUE_AUTO_PROCESS', self.PROMPT_QUEUE_AUTO_PROCESS))
|
||||
|
||||
@property
|
||||
def prompt_queue_force_process(self) -> bool:
|
||||
"""Whether queued prompts should bypass the Home Assistant energy gate."""
|
||||
return bool(_resolve_runtime_setting_value('PROMPT_QUEUE_FORCE_PROCESS', self.PROMPT_QUEUE_FORCE_PROCESS))
|
||||
|
||||
@property
|
||||
def prompt_queue_poll_interval_seconds(self) -> int:
|
||||
"""Get the queue polling interval for background processing."""
|
||||
return max(int(_resolve_runtime_setting_value('PROMPT_QUEUE_POLL_INTERVAL_SECONDS', self.PROMPT_QUEUE_POLL_INTERVAL_SECONDS)), 5)
|
||||
|
||||
@property
|
||||
def prompt_queue_max_batch_size(self) -> int:
|
||||
"""Get the maximum number of queued prompts to process in one batch."""
|
||||
return max(int(_resolve_runtime_setting_value('PROMPT_QUEUE_MAX_BATCH_SIZE', self.PROMPT_QUEUE_MAX_BATCH_SIZE)), 1)
|
||||
|
||||
@property
|
||||
def projects_root(self) -> Path:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,7 +6,7 @@ from urllib.parse import urlparse
|
||||
|
||||
from alembic import command
|
||||
from alembic.config import Config
|
||||
from sqlalchemy import create_engine, event, text
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
|
||||
@@ -64,20 +64,6 @@ def get_engine() -> Engine:
|
||||
pool_timeout=settings.DB_POOL_TIMEOUT or 30
|
||||
)
|
||||
|
||||
# Event listener for connection checkout (PostgreSQL only)
|
||||
if not settings.use_sqlite:
|
||||
@event.listens_for(engine, "checkout")
|
||||
def receive_checkout(dbapi_connection, connection_record, connection_proxy):
|
||||
"""Log connection checkout for audit purposes."""
|
||||
if settings.LOG_LEVEL in ("DEBUG", "INFO"):
|
||||
print(f"DB Connection checked out from pool")
|
||||
|
||||
@event.listens_for(engine, "checkin")
|
||||
def receive_checkin(dbapi_connection, connection_record):
|
||||
"""Log connection checkin for audit purposes."""
|
||||
if settings.LOG_LEVEL == "DEBUG":
|
||||
print(f"DB Connection returned to pool")
|
||||
|
||||
return engine
|
||||
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ def init(fastapi_app: FastAPI, storage_secret: str = 'Secr2t!') -> None:
|
||||
"""
|
||||
|
||||
def render_dashboard_page() -> None:
|
||||
ui.page_title('AI Software Factory')
|
||||
create_dashboard()
|
||||
|
||||
# NOTE dark mode will be persistent for each user across tabs and server restarts
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user