6 Commits
0.6.2 ... 0.6.5

Author SHA1 Message Date
032139c14f release: version 0.6.5 🚀
All checks were successful
Upload Python Package / Create Release (push) Successful in 1m18s
Upload Python Package / deploy (push) Successful in 3m11s
2026-04-10 22:13:40 +02:00
194d5658a6 fix: better n8n workflow, refs NOISSUE 2026-04-10 22:13:33 +02:00
b9faac8d16 release: version 0.6.4 🚀
All checks were successful
Upload Python Package / Create Release (push) Successful in 52s
Upload Python Package / deploy (push) Successful in 2m14s
2026-04-10 21:47:54 +02:00
80d7716e65 fix: add Telegram helper functions, refs NOISSUE 2026-04-10 21:47:50 +02:00
321bf74aef release: version 0.6.3 🚀
All checks were successful
Upload Python Package / Create Release (push) Successful in 15s
Upload Python Package / deploy (push) Successful in 1m11s
2026-04-10 21:24:44 +02:00
55ee75106c fix: n8n workflow generation, refs NOISSUE 2026-04-10 21:24:39 +02:00
8 changed files with 398 additions and 69 deletions

View File

@@ -5,10 +5,43 @@ Changelog
(unreleased)
------------
Fix
~~~
- Better n8n workflow, refs NOISSUE. [Simon Diesenreiter]
0.6.4 (2026-04-10)
------------------
Fix
~~~
- Add Telegram helper functions, refs NOISSUE. [Simon Diesenreiter]
Other
~~~~~
0.6.3 (2026-04-10)
------------------
Fix
~~~
- N8n workflow generation, refs NOISSUE. [Simon Diesenreiter]
Other
~~~~~
0.6.2 (2026-04-10)
------------------
Fix
~~~
- Fix Quasar layout issues, refs NOISSUE. [Simon Diesenreiter]
Other
~~~~~
0.6.1 (2026-04-10)
------------------

View File

@@ -86,11 +86,14 @@ docker-compose up -d
1. **Send a request via Telegram:**
```
Name: My Awesome App
Description: A web application for managing tasks
Features: user authentication, task CRUD, notifications
Build an internal task management app for our operations team.
It should support user authentication, task CRUD, notifications, and reporting.
Prefer FastAPI with PostgreSQL and a simple web dashboard.
```
The backend now interprets free-form Telegram text with Ollama before generation.
If `TELEGRAM_CHAT_ID` is set, the Telegram-trigger workflow only reacts to messages from that specific chat.
2. **Monitor progress via Web UI:**
Open `http://yourserver:8000/` to see the dashboard and `http://yourserver:8000/api` for API metadata
@@ -109,6 +112,7 @@ If you deploy the container with PostgreSQL environment variables set, the servi
| `/api` | GET | API information |
| `/health` | GET | Health check |
| `/generate` | POST | Generate new software |
| `/generate/text` | POST | Interpret free-form text and generate software |
| `/status/{project_id}` | GET | Get project status |
| `/projects` | GET | List all projects |

View File

@@ -1 +1 @@
0.6.2
0.6.5

View File

@@ -220,11 +220,31 @@ class N8NSetupAgent:
async def create_workflow(self, workflow_json: dict) -> dict:
"""Create or update a workflow."""
return await self._request("POST", "workflows", json=workflow_json)
return await self._request("POST", "workflows", json=self._workflow_payload(workflow_json))
def _workflow_payload(self, workflow_json: dict) -> dict:
"""Return a workflow payload without server-managed read-only fields."""
payload = dict(workflow_json)
payload.pop("active", None)
payload.pop("id", None)
payload.pop("createdAt", None)
payload.pop("updatedAt", None)
payload.pop("versionId", None)
return payload
async def _update_workflow_via_put(self, workflow_id: str, workflow_json: dict) -> dict:
"""Fallback update path for n8n instances that only support PUT."""
return await self._request("PUT", f"workflows/{workflow_id}", json=self._workflow_payload(workflow_json))
async def update_workflow(self, workflow_id: str, workflow_json: dict) -> dict:
"""Update an existing workflow."""
return await self._request("PATCH", f"workflows/{workflow_id}", json=workflow_json)
result = await self._request("PATCH", f"workflows/{workflow_id}", json=self._workflow_payload(workflow_json))
if result.get("status_code") == 405:
fallback = await self._update_workflow_via_put(workflow_id, workflow_json)
if not fallback.get("error") and isinstance(fallback, dict):
fallback.setdefault("method", "PUT")
return fallback
return result
async def enable_workflow(self, workflow_id: str) -> dict:
"""Enable a workflow."""
@@ -232,6 +252,11 @@ class N8NSetupAgent:
if result.get("error"):
fallback = await self._request("PATCH", f"workflows/{workflow_id}", json={"active": True})
if fallback.get("error"):
if fallback.get("status_code") == 405:
put_fallback = await self._request("PUT", f"workflows/{workflow_id}", json={"active": True})
if put_fallback.get("error"):
return put_fallback
return {"success": True, "id": workflow_id, "method": "put"}
return fallback
return {"success": True, "id": workflow_id, "method": "patch"}
return {"success": True, "id": workflow_id, "method": "activate"}
@@ -250,12 +275,12 @@ class N8NSetupAgent:
return value
return []
def build_telegram_workflow(self, webhook_path: str, backend_url: str) -> dict:
def build_telegram_workflow(self, webhook_path: str, backend_url: str, allowed_chat_id: str | None = None) -> dict:
"""Build the Telegram-to-backend workflow definition."""
normalized_path = webhook_path.strip().strip("/") or "telegram"
allowed_chat = json.dumps(str(allowed_chat_id)) if allowed_chat_id else "''"
return {
"name": "Telegram to AI Software Factory",
"active": False,
"settings": {"executionOrder": "v1"},
"nodes": [
{
@@ -273,13 +298,13 @@ class N8NSetupAgent:
},
{
"id": "parse-node",
"name": "Prepare Software Request",
"name": "Prepare Freeform Request",
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [-200, 120],
"parameters": {
"language": "javaScript",
"jsCode": "const body = $json.body ?? $json;\nconst message = body.message ?? body;\nconst text = String(message.text ?? '').trim();\nconst lines = text.split(/\\r?\\n/);\nconst request = { name: null, description: '', features: [], tech_stack: [] };\nlet nameIndex = -1;\nlet featuresIndex = -1;\nlet techIndex = -1;\nfor (let i = 0; i < lines.length; i += 1) {\n const line = lines[i].trim();\n if (line.toLowerCase().startsWith('name:')) { request.name = line.split(':', 2)[1]?.trim() || null; nameIndex = i; }\n if (line.toLowerCase().startsWith('features:') && featuresIndex === -1) { featuresIndex = i; }\n if (line.toLowerCase().startsWith('tech stack:') && techIndex === -1) { techIndex = i; }\n}\nif (nameIndex >= 0) {\n const descriptionEnd = featuresIndex >= 0 ? featuresIndex : (techIndex >= 0 ? techIndex : lines.length);\n request.description = lines.slice(nameIndex + 1, descriptionEnd).join('\\n').replace(/^description:\\s*/i, '').trim();\n}\nfunction collectList(startIndex, fieldName) {\n if (startIndex < 0) return;\n const firstLine = lines[startIndex].split(':').slice(1).join(':').trim();\n if (firstLine && !firstLine.startsWith('-') && !firstLine.startsWith('*')) {\n request[fieldName].push(...firstLine.split(',').map(item => item.trim()).filter(Boolean));\n }\n for (const rawLine of lines.slice(startIndex + 1)) {\n const line = rawLine.trim();\n if (!line) continue;\n if (/^[A-Za-z ]+:/.test(line)) break;\n if (line.startsWith('-') || line.startsWith('*')) {\n const value = line.slice(1).trim();\n if (value) request[fieldName].push(value);\n }\n }\n}\ncollectList(featuresIndex, 'features');\ncollectList(techIndex, 'tech_stack');\nif (!request.name || request.features.length === 0) { throw new Error('Could not parse software request from Telegram message'); }\nreturn [{ json: { ...request, _source: { raw_text: text, chat_id: message.chat?.id ?? null } } }];",
"jsCode": f"const allowedChatId = {allowed_chat};\nconst body = $json.body ?? $json;\nconst message = body.message ?? body;\nconst text = String(message.text ?? '').trim();\nconst chatId = String(message.chat?.id ?? '');\nif (allowedChatId && chatId !== allowedChatId) {{\n return [{{ json: {{ ignored: true, message: `Ignoring message from chat ${{chatId}}`, prompt_text: text, source: 'telegram', chat_id: chatId, chat_type: message.chat?.type ?? null }} }}];\n}}\nreturn [{{ json: {{ prompt_text: text, source: 'telegram', chat_id: chatId, chat_type: message.chat?.type ?? null }} }}];",
},
},
{
@@ -310,8 +335,8 @@ class N8NSetupAgent:
},
],
"connections": {
"Telegram Webhook": {"main": [[{"node": "Prepare Software Request", "type": "main", "index": 0}]]},
"Prepare Software Request": {"main": [[{"node": "AI Software Factory API", "type": "main", "index": 0}]]},
"Telegram Webhook": {"main": [[{"node": "Prepare Freeform Request", "type": "main", "index": 0}]]},
"Prepare Freeform Request": {"main": [[{"node": "AI Software Factory API", "type": "main", "index": 0}]]},
"AI Software Factory API": {"main": [[{"node": "Respond to Telegram Webhook", "type": "main", "index": 0}]]},
},
}
@@ -320,11 +345,12 @@ class N8NSetupAgent:
self,
backend_url: str,
credential_name: str,
allowed_chat_id: str | None = None,
) -> dict:
"""Build a production Telegram Trigger based workflow."""
allowed_chat = json.dumps(str(allowed_chat_id)) if allowed_chat_id else "''"
return {
"name": "Telegram to AI Software Factory",
"active": False,
"settings": {"executionOrder": "v1"},
"nodes": [
{
@@ -337,14 +363,14 @@ class N8NSetupAgent:
"credentials": {"telegramApi": {"name": credential_name}},
},
{
"id": "parse-node",
"name": "Prepare Software Request",
"id": "filter-node",
"name": "Prepare Freeform Request",
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [-180, 120],
"parameters": {
"language": "javaScript",
"jsCode": "const message = $json.message ?? $json;\nconst text = String(message.text ?? '').trim();\nconst lines = text.split(/\\r?\\n/);\nconst request = { name: null, description: '', features: [], tech_stack: [], _source: { raw_text: text, chat_id: message.chat?.id ?? null } };\nlet nameIndex = -1;\nlet featuresIndex = -1;\nlet techIndex = -1;\nfor (let i = 0; i < lines.length; i += 1) {\n const line = lines[i].trim();\n if (line.toLowerCase().startsWith('name:')) { request.name = line.split(':', 2)[1]?.trim() || null; nameIndex = i; }\n if (line.toLowerCase().startsWith('features:') && featuresIndex === -1) { featuresIndex = i; }\n if (line.toLowerCase().startsWith('tech stack:') && techIndex === -1) { techIndex = i; }\n}\nif (nameIndex >= 0) {\n const descriptionEnd = featuresIndex >= 0 ? featuresIndex : (techIndex >= 0 ? techIndex : lines.length);\n request.description = lines.slice(nameIndex + 1, descriptionEnd).join('\\n').replace(/^description:\\s*/i, '').trim();\n}\nfunction collectList(startIndex, fieldName) {\n if (startIndex < 0) return;\n const firstLine = lines[startIndex].split(':').slice(1).join(':').trim();\n if (firstLine && !firstLine.startsWith('-') && !firstLine.startsWith('*')) {\n request[fieldName].push(...firstLine.split(',').map(item => item.trim()).filter(Boolean));\n }\n for (const rawLine of lines.slice(startIndex + 1)) {\n const line = rawLine.trim();\n if (!line) continue;\n if (/^[A-Za-z ]+:/.test(line)) break;\n if (line.startsWith('-') || line.startsWith('*')) {\n const value = line.slice(1).trim();\n if (value) request[fieldName].push(value);\n }\n }\n}\ncollectList(featuresIndex, 'features');\ncollectList(techIndex, 'tech_stack');\nif (!request.name || request.features.length === 0) { throw new Error('Could not parse software request from Telegram message'); }\nreturn [{ json: request }];",
"jsCode": f"const allowedChatId = {allowed_chat};\nconst message = $json.message ?? $json;\nconst text = String(message.text ?? '').trim();\nconst chatId = String(message.chat?.id ?? '');\nif (!text) return [];\nif (allowedChatId && chatId !== allowedChatId) return [];\nreturn [{{ json: {{ prompt_text: text, source: 'telegram', chat_id: chatId, chat_type: message.chat?.type ?? null }} }}];",
},
},
{
@@ -378,8 +404,8 @@ class N8NSetupAgent:
},
],
"connections": {
"Telegram Trigger": {"main": [[{"node": "Prepare Software Request", "type": "main", "index": 0}]]},
"Prepare Software Request": {"main": [[{"node": "AI Software Factory API", "type": "main", "index": 0}]]},
"Telegram Trigger": {"main": [[{"node": "Prepare Freeform Request", "type": "main", "index": 0}]]},
"Prepare Freeform Request": {"main": [[{"node": "AI Software Factory API", "type": "main", "index": 0}]]},
"AI Software Factory API": {"main": [[{"node": "Send Telegram Update", "type": "main", "index": 0}]]},
},
}
@@ -433,7 +459,7 @@ class N8NSetupAgent:
"""
return await self.setup(
webhook_path=webhook_path,
backend_url=f"{settings.backend_public_url}/generate",
backend_url=f"{settings.backend_public_url}/generate/text",
force_update=False,
)
@@ -470,7 +496,7 @@ class N8NSetupAgent:
"suggestion": health.get("suggestion"),
}
effective_backend_url = backend_url or f"{settings.backend_public_url}/generate"
effective_backend_url = backend_url or f"{settings.backend_public_url}/generate/text"
effective_bot_token = telegram_bot_token or settings.telegram_bot_token
effective_credential_name = telegram_credential_name or settings.n8n_telegram_credential_name
trigger_mode = use_telegram_trigger if use_telegram_trigger is not None else bool(effective_bot_token)
@@ -482,11 +508,13 @@ class N8NSetupAgent:
workflow = self.build_telegram_trigger_workflow(
backend_url=effective_backend_url,
credential_name=effective_credential_name,
allowed_chat_id=settings.telegram_chat_id,
)
else:
workflow = self.build_telegram_workflow(
webhook_path=webhook_path,
backend_url=effective_backend_url,
allowed_chat_id=settings.telegram_chat_id,
)
existing = await self.get_workflow(workflow["name"])

View File

@@ -0,0 +1,105 @@
"""Interpret free-form software requests into structured generation input."""
from __future__ import annotations
import json
import re
try:
from ..config import settings
except ImportError:
from config import settings
class RequestInterpreter:
"""Use Ollama to turn free-form text into a structured software request."""
def __init__(self, ollama_url: str | None = None, model: str | None = None):
self.ollama_url = (ollama_url or settings.ollama_url).rstrip('/')
self.model = model or settings.OLLAMA_MODEL
async def interpret(self, prompt_text: str) -> dict:
"""Interpret free-form text into the request shape expected by the orchestrator."""
normalized = prompt_text.strip()
if not normalized:
raise ValueError('Prompt text cannot be empty')
try:
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.post(
f'{self.ollama_url}/api/chat',
json={
'model': self.model,
'stream': False,
'format': 'json',
'messages': [
{
'role': 'system',
'content': (
'You extract structured software requests. '
'Return only JSON with keys name, description, features, tech_stack. '
'name and description must be concise strings. '
'features and tech_stack must be arrays of strings. '
'Infer missing details from the user request instead of leaving arrays empty when possible.'
),
},
{'role': 'user', 'content': normalized},
],
},
) as resp:
payload = await resp.json()
if 200 <= resp.status < 300:
content = payload.get('message', {}).get('content', '')
if content:
return self._normalize_interpreted_request(json.loads(content), normalized)
except Exception:
pass
return self._heuristic_fallback(normalized)
def _normalize_interpreted_request(self, interpreted: dict, original_prompt: str) -> dict:
"""Normalize LLM output into the required request shape."""
name = str(interpreted.get('name') or '').strip() or self._derive_name(original_prompt)
description = str(interpreted.get('description') or '').strip() or original_prompt[:255]
features = self._normalize_list(interpreted.get('features'))
tech_stack = self._normalize_list(interpreted.get('tech_stack'))
if not features:
features = ['core workflow based on free-form request']
return {
'name': name[:255],
'description': description[:255],
'features': features,
'tech_stack': tech_stack,
}
def _normalize_list(self, value) -> list[str]:
if isinstance(value, list):
return [str(item).strip() for item in value if str(item).strip()]
if isinstance(value, str) and value.strip():
return [item.strip() for item in value.split(',') if item.strip()]
return []
def _derive_name(self, prompt_text: str) -> str:
"""Derive a stable project name when the LLM does not provide one."""
first_line = prompt_text.splitlines()[0].strip()
cleaned = re.sub(r'[^A-Za-z0-9 ]+', ' ', first_line)
words = [word.capitalize() for word in cleaned.split()[:4]]
return ' '.join(words) or 'Generated Project'
def _heuristic_fallback(self, prompt_text: str) -> dict:
"""Fallback request extraction when Ollama is unavailable."""
lowered = prompt_text.lower()
tech_candidates = [
'python', 'fastapi', 'django', 'flask', 'postgresql', 'sqlite', 'react', 'vue', 'nicegui', 'docker'
]
tech_stack = [candidate for candidate in tech_candidates if candidate in lowered]
sentences = [part.strip() for part in re.split(r'[\n\.]+', prompt_text) if part.strip()]
features = sentences[:3] or ['Implement the user request from free-form text']
return {
'name': self._derive_name(prompt_text),
'description': sentences[0][:255] if sentences else prompt_text[:255],
'features': features,
'tech_stack': tech_stack,
}

View File

@@ -1,8 +1,6 @@
"""Telegram bot integration for n8n webhook."""
import asyncio
import json
import re
from typing import Optional
@@ -13,6 +11,59 @@ class TelegramHandler:
self.webhook_url = webhook_url
self.api_url = "https://api.telegram.org/bot"
def build_prompt_guide_message(self, backend_url: str | None = None) -> str:
"""Build a Telegram message explaining the expected prompt format."""
lines = [
"AI Software Factory is listening in this chat.",
"",
"You can send free-form software requests in normal language.",
"",
"Example:",
"Build an internal inventory portal for our warehouse team.",
"It should support role-based login, stock dashboards, and purchase orders.",
"Prefer FastAPI, PostgreSQL, and a simple web UI.",
"",
"The backend will interpret the request and turn it into a structured project plan.",
]
if backend_url:
lines.extend(["", f"Backend target: {backend_url}"])
return "\n".join(lines)
async def send_message(self, bot_token: str, chat_id: str | int, text: str) -> dict:
"""Send a direct Telegram message using the configured bot."""
if not bot_token:
return {"status": "error", "message": "Telegram bot token is not configured"}
if chat_id in (None, ""):
return {"status": "error", "message": "Telegram chat id is not configured"}
api_endpoint = f"{self.api_url}{bot_token}/sendMessage"
try:
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.post(
api_endpoint,
json={
"chat_id": str(chat_id),
"text": text,
},
) as resp:
payload = await resp.json()
if 200 <= resp.status < 300 and payload.get("ok"):
return {
"status": "success",
"message": "Telegram prompt guide sent successfully",
"payload": payload,
}
description = payload.get("description") or payload.get("message") or str(payload)
return {
"status": "error",
"message": f"Telegram API returned {resp.status}: {description}",
"payload": payload,
}
except Exception as exc:
return {"status": "error", "message": str(exc)}
async def handle_message(self, message_data: dict) -> dict:
"""Handle incoming Telegram message."""
text = message_data.get("text", "")

View File

@@ -5,16 +5,18 @@ from __future__ import annotations
from contextlib import closing
from html import escape
from nicegui import ui
from nicegui import app, ui
try:
from .agents.database_manager import DatabaseManager
from .agents.n8n_setup import N8NSetupAgent
from .agents.telegram import TelegramHandler
from .config import settings
from .database import get_database_runtime_summary, get_db_sync, init_db
except ImportError:
from agents.database_manager import DatabaseManager
from agents.n8n_setup import N8NSetupAgent
from agents.telegram import TelegramHandler
from config import settings
from database import get_database_runtime_summary, get_db_sync, init_db
@@ -165,6 +167,7 @@ def _render_health_panels() -> None:
('API URL', n8n_health.get('api_url') or 'Not configured'),
('Auth Configured', 'yes' if n8n_health.get('auth_configured') else 'no'),
('Checked Via', n8n_health.get('checked_via') or 'none'),
('Telegram Chat ID', settings.telegram_chat_id or 'Not configured'),
]
if n8n_health.get('workflow_count') is not None:
rows.append(('Workflow Count', str(n8n_health['workflow_count'])))
@@ -204,6 +207,15 @@ def create_health_page() -> None:
def create_dashboard():
"""Create the main NiceGUI dashboard."""
_add_dashboard_styles()
active_tab_key = 'dashboard.active_tab'
def _selected_tab_name() -> str:
"""Return the persisted active dashboard tab."""
return app.storage.user.get(active_tab_key, 'overview')
def _store_selected_tab(event) -> None:
"""Persist the active dashboard tab across refreshes."""
app.storage.user[active_tab_key] = event.value or 'overview'
async def setup_n8n_workflow_action() -> None:
api_url = _resolve_n8n_api_url()
@@ -214,7 +226,7 @@ def create_dashboard():
agent = N8NSetupAgent(api_url=api_url, webhook_token=settings.n8n_api_key)
result = await agent.setup(
webhook_path='telegram',
backend_url=f'{settings.backend_public_url}/generate',
backend_url=f'{settings.backend_public_url}/generate/text',
force_update=True,
)
@@ -232,6 +244,34 @@ def create_dashboard():
ui.notify(result.get('message', 'n8n setup finished'), color='positive' if result.get('status') == 'success' else 'negative')
dashboard_body.refresh()
async def send_telegram_prompt_guide_action() -> None:
if not settings.telegram_bot_token:
ui.notify('Configure TELEGRAM_BOT_TOKEN first', color='negative')
return
if not settings.telegram_chat_id:
ui.notify('Configure TELEGRAM_CHAT_ID to message the prompt channel', color='negative')
return
handler = TelegramHandler(settings.n8n_webhook_url or _resolve_n8n_api_url())
message = handler.build_prompt_guide_message(settings.backend_public_url)
result = await handler.send_message(
bot_token=settings.telegram_bot_token,
chat_id=settings.telegram_chat_id,
text=message,
)
db = get_db_sync()
if db is not None:
with closing(db):
DatabaseManager(db).log_system_event(
component='telegram',
level='INFO' if result.get('status') == 'success' else 'ERROR',
message=result.get('message', str(result)),
)
ui.notify(result.get('message', 'Telegram message sent'), color='positive' if result.get('status') == 'success' else 'negative')
dashboard_body.refresh()
def init_db_action() -> None:
result = init_db()
ui.notify(result.get('message', 'Database initialized'), color='positive' if result.get('status') == 'success' else 'negative')
@@ -270,6 +310,7 @@ def create_dashboard():
ui.button('Refresh', on_click=dashboard_body.refresh).props('outline')
ui.button('Initialize DB', on_click=init_db_action).props('unelevated color=dark')
ui.button('Provision n8n Workflow', on_click=setup_n8n_workflow_action).props('unelevated color=accent')
ui.button('Message Prompt Channel', on_click=send_telegram_prompt_guide_action).props('outline color=secondary')
with ui.grid(columns=4).classes('w-full gap-4'):
metrics = [
@@ -284,15 +325,16 @@ def create_dashboard():
ui.label(str(value)).style('font-size: 2.1rem; font-weight: 800; margin-top: 6px;')
ui.label(subtitle).style('font-size: 0.9rem; opacity: 0.78; margin-top: 8px;')
with ui.tabs().classes('w-full') as tabs:
overview_tab = ui.tab('Overview')
projects_tab = ui.tab('Projects')
trace_tab = ui.tab('Prompt Trace')
system_tab = ui.tab('System')
health_tab = ui.tab('Health')
selected_tab = _selected_tab_name()
with ui.tabs(value=selected_tab, on_change=_store_selected_tab).classes('w-full') as tabs:
ui.tab('Overview').props('name=overview')
ui.tab('Projects').props('name=projects')
ui.tab('Prompt Trace').props('name=trace')
ui.tab('System').props('name=system')
ui.tab('Health').props('name=health')
with ui.tab_panels(tabs, value=overview_tab).classes('w-full'):
with ui.tab_panel(overview_tab):
with ui.tab_panels(tabs, value=selected_tab).classes('w-full'):
with ui.tab_panel('overview'):
with ui.grid(columns=2).classes('w-full gap-4'):
with ui.card().classes('factory-panel q-pa-lg'):
ui.label('Project Pipeline').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
@@ -322,7 +364,7 @@ def create_dashboard():
ui.label(label).classes('factory-muted')
ui.label(value).style('font-weight: 600; color: #3a281a;')
with ui.tab_panel(projects_tab):
with ui.tab_panel('projects'):
if not projects:
with ui.card().classes('factory-panel q-pa-lg'):
ui.label('No project data available yet.').classes('factory-muted')
@@ -376,7 +418,7 @@ def create_dashboard():
else:
ui.label('No audit events yet.').classes('factory-muted')
with ui.tab_panel(trace_tab):
with ui.tab_panel('trace'):
with ui.card().classes('factory-panel q-pa-lg'):
ui.label('Prompt to Code Correlation').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
ui.label('Each prompt entry is linked to the generated files recorded after that prompt for the same project.').classes('factory-muted')
@@ -397,7 +439,7 @@ def create_dashboard():
else:
ui.label('No prompt traces recorded yet.').classes('factory-muted')
with ui.tab_panel(system_tab):
with ui.tab_panel('system'):
with ui.grid(columns=2).classes('w-full gap-4'):
with ui.card().classes('factory-panel q-pa-lg'):
ui.label('System Logs').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
@@ -423,11 +465,19 @@ def create_dashboard():
for endpoint in endpoints:
ui.label(endpoint).classes('factory-code q-mt-sm')
with ui.tab_panel(health_tab):
with ui.tab_panel('health'):
with ui.card().classes('factory-panel q-pa-lg q-mb-md'):
ui.label('Health and Diagnostics').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
ui.label('Use this page to verify runtime configuration, n8n API connectivity, and likely causes of provisioning failures.').classes('factory-muted')
ui.link('Open dedicated health page', '/health-ui')
with ui.card().classes('factory-panel q-pa-lg q-mb-md'):
ui.label('Telegram Prompt Channel').style('font-size: 1.25rem; font-weight: 700; color: #3a281a;')
ui.label('Send a guide message into the same Telegram chat/channel where the bot is expected to receive prompts.').classes('factory-muted')
with ui.row().classes('justify-between w-full q-mt-sm'):
ui.label('Configured Chat ID').classes('factory-muted')
ui.label(settings.telegram_chat_id or 'Not configured').style('font-weight: 600; color: #3a281a;')
with ui.row().classes('items-center gap-2 q-mt-md'):
ui.button('Send Prompt Guide', on_click=send_telegram_prompt_guide_action).props('unelevated color=secondary')
_render_health_panels()
dashboard_body()

View File

@@ -28,6 +28,7 @@ try:
from . import __version__, frontend
from . import database as database_module
from .agents.database_manager import DatabaseManager
from .agents.request_interpreter import RequestInterpreter
from .agents.orchestrator import AgentOrchestrator
from .agents.n8n_setup import N8NSetupAgent
from .agents.ui_manager import UIManager
@@ -36,6 +37,7 @@ except ImportError:
import frontend
import database as database_module
from agents.database_manager import DatabaseManager
from agents.request_interpreter import RequestInterpreter
from agents.orchestrator import AgentOrchestrator
from agents.n8n_setup import N8NSetupAgent
from agents.ui_manager import UIManager
@@ -79,6 +81,15 @@ class N8NSetupRequest(BaseModel):
force_update: bool = False
class FreeformSoftwareRequest(BaseModel):
"""Request body for free-form software generation."""
prompt_text: str = Field(min_length=1)
source: str = 'telegram'
chat_id: str | None = None
chat_type: str | None = None
def _build_project_id(name: str) -> str:
"""Create a stable project id from the requested name."""
slug = PROJECT_ID_PATTERN.sub("-", name.strip().lower()).strip("-") or "project"
@@ -144,6 +155,49 @@ def _compose_prompt_text(request: SoftwareRequest) -> str:
)
async def _run_generation(
request: SoftwareRequest,
db: Session,
prompt_text: str | None = None,
prompt_actor: str = 'api',
) -> dict:
"""Run the shared generation pipeline for a structured request."""
database_module.init_db()
project_id = _build_project_id(request.name)
resolved_prompt_text = prompt_text or _compose_prompt_text(request)
orchestrator = AgentOrchestrator(
project_id=project_id,
project_name=request.name,
description=request.description,
features=request.features,
tech_stack=request.tech_stack,
db=db,
prompt_text=resolved_prompt_text,
prompt_actor=prompt_actor,
)
result = await orchestrator.run()
manager = DatabaseManager(db)
manager.log_system_event(
component='api',
level='INFO' if result['status'] == 'completed' else 'ERROR',
message=f"Generated project {project_id} with {len(result.get('changed_files', []))} artifact(s)",
)
history = manager.get_project_by_id(project_id)
project_logs = manager.get_project_logs(history.id)
response_data = _serialize_project(history)
response_data['logs'] = [_serialize_project_log(log) for log in project_logs]
response_data['ui_data'] = result.get('ui_data')
response_data['features'] = request.features
response_data['tech_stack'] = request.tech_stack
response_data['project_root'] = result.get('project_root', str(_project_root(project_id)))
response_data['changed_files'] = result.get('changed_files', [])
response_data['repository'] = result.get('repository')
return {'status': result['status'], 'data': response_data}
def _project_root(project_id: str) -> Path:
"""Resolve the filesystem location for a generated project."""
return database_module.settings.projects_root / project_id
@@ -172,6 +226,7 @@ def read_api_info():
'/api',
'/health',
'/generate',
'/generate/text',
'/projects',
'/status/{project_id}',
'/audit/projects',
@@ -202,40 +257,43 @@ def health_check():
@app.post('/generate')
async def generate_software(request: SoftwareRequest, db: DbSession):
"""Create and record a software-generation request."""
database_module.init_db()
return await _run_generation(request, db)
project_id = _build_project_id(request.name)
prompt_text = _compose_prompt_text(request)
orchestrator = AgentOrchestrator(
project_id=project_id,
project_name=request.name,
description=request.description,
features=request.features,
tech_stack=request.tech_stack,
db=db,
prompt_text=prompt_text,
@app.post('/generate/text')
async def generate_software_from_text(request: FreeformSoftwareRequest, db: DbSession):
"""Interpret a free-form request and run generation."""
if (
request.source == 'telegram'
and database_module.settings.telegram_chat_id
and request.chat_id
and str(request.chat_id) != str(database_module.settings.telegram_chat_id)
):
return {
'status': 'ignored',
'message': f"Ignoring Telegram message from chat {request.chat_id}",
'source': {
'type': request.source,
'chat_id': request.chat_id,
'chat_type': request.chat_type,
},
}
interpreted = await RequestInterpreter().interpret(request.prompt_text)
structured_request = SoftwareRequest(**interpreted)
response = await _run_generation(
structured_request,
db,
prompt_text=request.prompt_text,
prompt_actor=request.source,
)
result = await orchestrator.run()
manager = DatabaseManager(db)
manager.log_system_event(
component='api',
level='INFO' if result['status'] == 'completed' else 'ERROR',
message=f"Generated project {project_id} with {len(result.get('changed_files', []))} artifact(s)",
)
history = manager.get_project_by_id(project_id)
project_logs = manager.get_project_logs(history.id)
response_data = _serialize_project(history)
response_data['logs'] = [_serialize_project_log(log) for log in project_logs]
response_data['ui_data'] = result.get('ui_data')
response_data['features'] = request.features
response_data['tech_stack'] = request.tech_stack
response_data['project_root'] = result.get('project_root', str(_project_root(project_id)))
response_data['changed_files'] = result.get('changed_files', [])
response_data['repository'] = result.get('repository')
return {'status': result['status'], 'data': response_data}
response['interpreted_request'] = interpreted
response['source'] = {
'type': request.source,
'chat_id': request.chat_id,
'chat_type': request.chat_type,
}
return response
@app.get('/projects')
@@ -348,7 +406,7 @@ async def setup_n8n_workflow(request: N8NSetupRequest, db: DbSession):
)
result = await agent.setup(
webhook_path=request.webhook_path,
backend_url=request.backend_url or f"{database_module.settings.backend_public_url}/generate",
backend_url=request.backend_url or f"{database_module.settings.backend_public_url}/generate/text",
force_update=request.force_update,
telegram_bot_token=database_module.settings.telegram_bot_token,
telegram_credential_name=database_module.settings.n8n_telegram_credential_name,