wire up /agent/execute and /agent/stop endpoints
- Add runSessionAgent: streaming variant of runAgent that PATCHes VIBN DB after every LLM turn and tool call so frontend can poll live output - Track changed files from write_file / replace_in_file tool calls - Add /agent/execute: receives sessionId + giteaRepo + task, clones repo, scopes workspace to appPath, runs Coder agent async (returns 202 immediately) - Add /agent/stop: sets stopped flag; agent checks between turns and exits cleanly - Agent does NOT commit on completion — leaves changes for user review/approval Made-with: Cursor
This commit is contained in:
28
dist/agent-session-runner.d.ts
vendored
Normal file
28
dist/agent-session-runner.d.ts
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
/**
|
||||
* agent-session-runner.ts
|
||||
*
|
||||
* Streaming variant of runAgent wired to a VIBN agent_sessions row.
|
||||
* After every LLM turn + tool call, it PATCHes the session in the VIBN DB
|
||||
* so the frontend can poll (and later WebSocket) the live output.
|
||||
*
|
||||
* Key differences from runAgent:
|
||||
* - Accepts an `emit` callback instead of updating job-store
|
||||
* - Accepts an `isStopped` check so the frontend can cancel mid-run
|
||||
* - Tracks which files were written/modified for the changed_files panel
|
||||
* - Calls vibn-frontend's PATCH /api/projects/[id]/agent/sessions/[sid]
|
||||
*/
|
||||
import { AgentConfig } from './agents';
|
||||
import { ToolContext } from './tools';
|
||||
export interface OutputLine {
|
||||
ts: string;
|
||||
type: 'step' | 'stdout' | 'stderr' | 'info' | 'error' | 'done';
|
||||
text: string;
|
||||
}
|
||||
export interface SessionRunOptions {
|
||||
sessionId: string;
|
||||
projectId: string;
|
||||
vibnApiUrl: string;
|
||||
appPath: string;
|
||||
isStopped: () => boolean;
|
||||
}
|
||||
export declare function runSessionAgent(config: AgentConfig, task: string, ctx: ToolContext, opts: SessionRunOptions): Promise<void>;
|
||||
189
dist/agent-session-runner.js
vendored
Normal file
189
dist/agent-session-runner.js
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
"use strict";
|
||||
/**
|
||||
* agent-session-runner.ts
|
||||
*
|
||||
* Streaming variant of runAgent wired to a VIBN agent_sessions row.
|
||||
* After every LLM turn + tool call, it PATCHes the session in the VIBN DB
|
||||
* so the frontend can poll (and later WebSocket) the live output.
|
||||
*
|
||||
* Key differences from runAgent:
|
||||
* - Accepts an `emit` callback instead of updating job-store
|
||||
* - Accepts an `isStopped` check so the frontend can cancel mid-run
|
||||
* - Tracks which files were written/modified for the changed_files panel
|
||||
* - Calls vibn-frontend's PATCH /api/projects/[id]/agent/sessions/[sid]
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.runSessionAgent = runSessionAgent;
|
||||
const llm_1 = require("./llm");
|
||||
const tools_1 = require("./tools");
|
||||
const loader_1 = require("./prompts/loader");
|
||||
const MAX_TURNS = 60;
|
||||
// ── VIBN DB bridge ────────────────────────────────────────────────────────────
|
||||
async function patchSession(opts, payload) {
|
||||
const url = `${opts.vibnApiUrl}/api/projects/${opts.projectId}/agent/sessions/${opts.sessionId}`;
|
||||
try {
|
||||
await fetch(url, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json', 'x-agent-runner-secret': process.env.AGENT_RUNNER_SECRET ?? '' },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
// Log but don't crash — output will be lost for this line but loop continues
|
||||
console.warn('[session-runner] PATCH failed:', err instanceof Error ? err.message : err);
|
||||
}
|
||||
}
|
||||
function now() {
|
||||
return new Date().toISOString();
|
||||
}
|
||||
// ── File change tracking ──────────────────────────────────────────────────────
|
||||
const FILE_WRITE_TOOLS = new Set(['write_file', 'replace_in_file', 'create_file']);
|
||||
function extractChangedFile(toolName, args, workspaceRoot, appPath) {
|
||||
if (!FILE_WRITE_TOOLS.has(toolName))
|
||||
return null;
|
||||
const rawPath = String(args.path ?? args.file_path ?? '');
|
||||
if (!rawPath)
|
||||
return null;
|
||||
// Make path relative to appPath for display
|
||||
const fullPrefix = `${workspaceRoot}/${appPath}/`;
|
||||
const appPrefix = `${appPath}/`;
|
||||
let displayPath = rawPath
|
||||
.replace(fullPrefix, '')
|
||||
.replace(appPrefix, '');
|
||||
const fileStatus = toolName === 'write_file' ? 'added' : 'modified';
|
||||
return { path: displayPath, status: fileStatus };
|
||||
}
|
||||
// ── Main streaming execution loop ─────────────────────────────────────────────
|
||||
async function runSessionAgent(config, task, ctx, opts) {
|
||||
const llm = (0, llm_1.createLLM)(config.model, { temperature: 0.2 });
|
||||
const oaiTools = (0, llm_1.toOAITools)(config.tools);
|
||||
const emit = async (line) => {
|
||||
console.log(`[session ${opts.sessionId}] ${line.type}: ${line.text}`);
|
||||
await patchSession(opts, { outputLine: line });
|
||||
};
|
||||
await emit({ ts: now(), type: 'info', text: `Agent starting (${llm.modelId}) — working in ${opts.appPath}` });
|
||||
// Scope the system prompt to the specific app within the monorepo
|
||||
const basePrompt = (0, loader_1.resolvePrompt)(config.promptId);
|
||||
const scopedPrompt = `${basePrompt}
|
||||
|
||||
## Active context
|
||||
You are working inside the monorepo directory: ${opts.appPath}
|
||||
All file paths you use should be relative to this directory unless otherwise specified.
|
||||
When running commands, always cd into ${opts.appPath} first unless already there.
|
||||
When you are done, do NOT commit directly — leave the changes uncommitted so the user can review and approve them.
|
||||
`;
|
||||
const history = [
|
||||
{ role: 'user', content: task }
|
||||
];
|
||||
let turn = 0;
|
||||
let finalText = '';
|
||||
const trackedFiles = new Map(); // path → status
|
||||
while (turn < MAX_TURNS) {
|
||||
// Check for stop signal between turns
|
||||
if (opts.isStopped()) {
|
||||
await emit({ ts: now(), type: 'info', text: 'Stopped by user.' });
|
||||
await patchSession(opts, { status: 'stopped' });
|
||||
return;
|
||||
}
|
||||
turn++;
|
||||
await emit({ ts: now(), type: 'info', text: `Turn ${turn} — thinking…` });
|
||||
const messages = [
|
||||
{ role: 'system', content: scopedPrompt },
|
||||
...history
|
||||
];
|
||||
let response;
|
||||
try {
|
||||
response = await llm.chat(messages, oaiTools, 8192);
|
||||
}
|
||||
catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
await emit({ ts: now(), type: 'error', text: `LLM error: ${msg}` });
|
||||
await patchSession(opts, { status: 'failed', error: msg });
|
||||
return;
|
||||
}
|
||||
const assistantMsg = {
|
||||
role: 'assistant',
|
||||
content: response.content,
|
||||
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
|
||||
};
|
||||
history.push(assistantMsg);
|
||||
// Agent finished — no more tool calls
|
||||
if (response.tool_calls.length === 0) {
|
||||
finalText = response.content ?? 'Task complete.';
|
||||
break;
|
||||
}
|
||||
// Execute each tool call
|
||||
for (const tc of response.tool_calls) {
|
||||
if (opts.isStopped())
|
||||
break;
|
||||
const fnName = tc.function.name;
|
||||
let fnArgs = {};
|
||||
try {
|
||||
fnArgs = JSON.parse(tc.function.arguments || '{}');
|
||||
}
|
||||
catch { /* bad JSON */ }
|
||||
// Human-readable step label
|
||||
const stepLabel = buildStepLabel(fnName, fnArgs);
|
||||
await emit({ ts: now(), type: 'step', text: stepLabel });
|
||||
let result;
|
||||
try {
|
||||
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
|
||||
}
|
||||
catch (err) {
|
||||
result = { error: err instanceof Error ? err.message : String(err) };
|
||||
}
|
||||
// Stream stdout/stderr if present
|
||||
if (result && typeof result === 'object') {
|
||||
const r = result;
|
||||
if (r.stdout && String(r.stdout).trim()) {
|
||||
for (const line of String(r.stdout).split('\n').filter(Boolean).slice(0, 40)) {
|
||||
await emit({ ts: now(), type: 'stdout', text: line });
|
||||
}
|
||||
}
|
||||
if (r.stderr && String(r.stderr).trim()) {
|
||||
for (const line of String(r.stderr).split('\n').filter(Boolean).slice(0, 20)) {
|
||||
await emit({ ts: now(), type: 'stderr', text: line });
|
||||
}
|
||||
}
|
||||
if (r.error) {
|
||||
await emit({ ts: now(), type: 'error', text: String(r.error) });
|
||||
}
|
||||
}
|
||||
// Track file changes
|
||||
const changed = extractChangedFile(fnName, fnArgs, ctx.workspaceRoot, opts.appPath);
|
||||
if (changed && !trackedFiles.has(changed.path)) {
|
||||
trackedFiles.set(changed.path, changed.status);
|
||||
await patchSession(opts, { changedFile: changed });
|
||||
await emit({ ts: now(), type: 'info', text: `${changed.status === 'added' ? '+ Created' : '~ Modified'} ${changed.path}` });
|
||||
}
|
||||
history.push({
|
||||
role: 'tool',
|
||||
tool_call_id: tc.id,
|
||||
name: fnName,
|
||||
content: typeof result === 'string' ? result : JSON.stringify(result)
|
||||
});
|
||||
}
|
||||
}
|
||||
if (turn >= MAX_TURNS && !finalText) {
|
||||
finalText = `Hit the ${MAX_TURNS}-turn limit. Stopping.`;
|
||||
}
|
||||
await emit({ ts: now(), type: 'done', text: finalText });
|
||||
await patchSession(opts, {
|
||||
status: 'done',
|
||||
outputLine: { ts: now(), type: 'done', text: '✓ Complete — review changes and approve to commit.' }
|
||||
});
|
||||
}
|
||||
// ── Step label helpers ────────────────────────────────────────────────────────
|
||||
function buildStepLabel(tool, args) {
|
||||
switch (tool) {
|
||||
case 'read_file': return `Read ${args.path ?? args.file_path}`;
|
||||
case 'write_file': return `Write ${args.path ?? args.file_path}`;
|
||||
case 'replace_in_file': return `Edit ${args.path ?? args.file_path}`;
|
||||
case 'list_directory': return `List ${args.path ?? '.'}`;
|
||||
case 'find_files': return `Find files: ${args.pattern}`;
|
||||
case 'search_code': return `Search: ${args.query}`;
|
||||
case 'execute_command': return `Run: ${String(args.command ?? '').slice(0, 80)}`;
|
||||
case 'git_commit_and_push': return `Git commit: "${args.message}"`;
|
||||
default: return `${tool}(${JSON.stringify(args).slice(0, 60)})`;
|
||||
}
|
||||
}
|
||||
2
dist/agents/atlas.js
vendored
2
dist/agents/atlas.js
vendored
@@ -6,5 +6,5 @@ const registry_1 = require("./registry");
|
||||
description: 'PRD agent — guides users through structured product discovery and produces a comprehensive requirements document',
|
||||
model: 'A', // Gemini Flash — fast, conversational, cost-effective for dialogue
|
||||
promptId: 'atlas',
|
||||
tools: (0, registry_1.pick)(['finalize_prd'])
|
||||
tools: (0, registry_1.pick)(['web_search', 'finalize_prd'])
|
||||
});
|
||||
|
||||
2
dist/atlas.d.ts
vendored
2
dist/atlas.d.ts
vendored
@@ -18,4 +18,6 @@ export interface AtlasChatResult {
|
||||
}
|
||||
export declare function atlasChat(sessionId: string, userMessage: string, ctx: ToolContext, opts?: {
|
||||
preloadedHistory?: LLMMessage[];
|
||||
/** When true, the user message is an internal init trigger and should not be stored in history */
|
||||
isInit?: boolean;
|
||||
}): Promise<AtlasChatResult>;
|
||||
|
||||
7
dist/atlas.js
vendored
7
dist/atlas.js
vendored
@@ -38,7 +38,7 @@ function listAtlasSessions() {
|
||||
// ---------------------------------------------------------------------------
|
||||
// Main chat handler
|
||||
// ---------------------------------------------------------------------------
|
||||
const ATLAS_TOOLS = tools_1.ALL_TOOLS.filter(t => t.name === 'finalize_prd');
|
||||
const ATLAS_TOOLS = tools_1.ALL_TOOLS.filter(t => ['finalize_prd', 'web_search'].includes(t.name));
|
||||
async function atlasChat(sessionId, userMessage, ctx, opts) {
|
||||
const llm = (0, llm_1.createLLM)(process.env.ATLAS_MODEL ?? 'A', { temperature: 0.5 });
|
||||
const session = getOrCreateSession(sessionId);
|
||||
@@ -48,7 +48,10 @@ async function atlasChat(sessionId, userMessage, ctx, opts) {
|
||||
}
|
||||
const oaiTools = (0, llm_1.toOAITools)(ATLAS_TOOLS);
|
||||
const systemPrompt = (0, loader_1.resolvePrompt)('atlas');
|
||||
session.history.push({ role: 'user', content: userMessage });
|
||||
// For init triggers, don't add the synthetic prompt as a user turn
|
||||
if (!opts?.isInit) {
|
||||
session.history.push({ role: 'user', content: userMessage });
|
||||
}
|
||||
const buildMessages = () => [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
...session.history.slice(-60)
|
||||
|
||||
65
dist/prompts/atlas.js
vendored
65
dist/prompts/atlas.js
vendored
@@ -134,6 +134,59 @@ The PRD should be specific enough that a technical team could implement it witho
|
||||
- **Signal structure.** Let them know where they are: "Great, I've got a solid picture of your users. Let's talk about what they actually do in the product."
|
||||
- **Ask max 2–3 questions at a time.** Never overwhelm.
|
||||
|
||||
## Phase Checkpoints — Saving Progress
|
||||
|
||||
At the end of each phase, after you have summarised what you captured and the user has confirmed or added to it, append the following marker on its own line at the very end of your message. Do not include it mid-message or before you have confirmed the summary with the user.
|
||||
|
||||
Format (replace values, keep the exact tag):
|
||||
[[PHASE_COMPLETE:{"phase":"<phase_id>","title":"<Phase Title>","summary":"<1–2 sentence plain-English summary of what was captured>","data":{<key fields as a flat JSON object>}}]]
|
||||
|
||||
Phase IDs and their key data fields:
|
||||
- phase_id "big_picture" → fields: productName, problemStatement, targetUser, successMetric, competitors, deadline
|
||||
- phase_id "users_personas" → fields: userTypes (array), primaryGoals, accessModel, happyPath
|
||||
- phase_id "features_scope" → fields: mustHave (array), shouldHave (array), outOfScope (array), platforms, integrations
|
||||
- phase_id "business_model" → fields: revenueType, pricingModel, tiers (array), expectedVolume, budgetCeiling
|
||||
- phase_id "screens_data" → fields: keyScreens (array of {name, purpose, actions}), hasSearch, notifications
|
||||
- phase_id "risks_questions" → fields: risks (array), openQuestions (array), assumptions (array)
|
||||
|
||||
Rules:
|
||||
- Only append the marker ONCE per phase, after explicit user confirmation of the summary.
|
||||
- Never guess — only include fields the user actually provided. Use null for unknown fields.
|
||||
- The marker will be hidden from the user and converted into a save button. Do not mention it.
|
||||
- Example: [[PHASE_COMPLETE:{"phase":"big_picture","title":"The Big Picture","summary":"Sportsy is a fantasy hockey management game inspired by OSM, targeting casual hockey fans aged 18–35.","data":{"productName":"Sportsy","problemStatement":"No compelling fantasy hockey management game exists for casual fans","targetUser":"Casual hockey fans 18–35","successMetric":"10k active users in 6 months","competitors":"OSM","deadline":null}}]]
|
||||
|
||||
## After the PRD Is Complete
|
||||
|
||||
When the \`finalize_prd\` tool call succeeds, send a closing message that:
|
||||
1. Acknowledges the PRD is saved
|
||||
2. Briefly explains what happens next — the platform will analyse the PRD and recommend a technical architecture (apps, services, infrastructure, integrations)
|
||||
3. Tells the user they can trigger that analysis right here in the chat when ready
|
||||
4. Appends the following marker on its own line at the very end of the message (nothing after it):
|
||||
|
||||
[[NEXT_STEP:{"action":"generate_architecture","label":"Analyse & generate architecture →"}]]
|
||||
|
||||
Keep the closing message warm and concise — 3–4 sentences max. Do not explain the architecture in detail; that's for the next step. Do not mention the marker.
|
||||
|
||||
Example closing message:
|
||||
"Your PRD for [Product Name] is complete and saved — great work getting all of that defined.
|
||||
|
||||
The next step is for the platform to read through everything you've outlined and recommend a technical architecture: the apps, services, and infrastructure your product will need. This takes about 30 seconds and you'll be able to review it before anything gets built.
|
||||
|
||||
Trigger the analysis whenever you're ready."
|
||||
|
||||
[[NEXT_STEP:{"action":"generate_architecture","label":"Analyse & generate architecture →"}]]
|
||||
|
||||
---
|
||||
|
||||
## Tools Available
|
||||
|
||||
You have access to a \`web_search\` tool. Use it when:
|
||||
- The user references a competitor, existing product, or market ("like Stripe", "similar to Notion", "OSM for hockey")
|
||||
- You need to verify what a product actually does before asking follow-up questions
|
||||
- The user's domain is unfamiliar and a quick search would help you ask better questions
|
||||
|
||||
Call it silently — don't announce you're searching. Just use the result to inform your next question or summary.
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
- Generating a full PRD from a one-line description
|
||||
@@ -152,4 +205,16 @@ The PRD should be specific enough that a technical team could implement it witho
|
||||
- **User is vague:** Offer options — "Let me give you three common approaches and you tell me which feels closest…"
|
||||
- **User changes direction mid-conversation:** Acknowledge the pivot and resurface downstream impacts.
|
||||
- **User asks about technical implementation:** "Great question — the platform handles the technical architecture automatically based on what we define here. What matters for the PRD is [reframe to product question]."
|
||||
|
||||
## Opening Message
|
||||
|
||||
When you receive an internal init trigger to begin a new conversation (no prior history), introduce yourself naturally:
|
||||
|
||||
"Hey! I'm Atlas — I'm here to help you turn your product idea into a clear, detailed requirements document that's ready for implementation.
|
||||
|
||||
Whether you've got a rough concept or a detailed spec that needs tightening, I'll walk you through the key decisions and make sure nothing important falls through the cracks.
|
||||
|
||||
So — what are we building?"
|
||||
|
||||
Do not mention that you received an internal trigger. Just deliver the opening message naturally.
|
||||
`.trim());
|
||||
|
||||
126
dist/server.js
vendored
126
dist/server.js
vendored
@@ -44,10 +44,12 @@ const crypto = __importStar(require("crypto"));
|
||||
const child_process_1 = require("child_process");
|
||||
const job_store_1 = require("./job-store");
|
||||
const agent_runner_1 = require("./agent-runner");
|
||||
const agent_session_runner_1 = require("./agent-session-runner");
|
||||
const agents_1 = require("./agents");
|
||||
const security_1 = require("./tools/security");
|
||||
const orchestrator_1 = require("./orchestrator");
|
||||
const atlas_1 = require("./atlas");
|
||||
const llm_1 = require("./llm");
|
||||
const app = (0, express_1.default)();
|
||||
app.use((0, cors_1.default)());
|
||||
const startTime = new Date();
|
||||
@@ -219,7 +221,7 @@ app.delete('/orchestrator/sessions/:id', (req, res) => {
|
||||
// Atlas — PRD discovery agent
|
||||
// ---------------------------------------------------------------------------
|
||||
app.post('/atlas/chat', async (req, res) => {
|
||||
const { message, session_id, history } = req.body;
|
||||
const { message, session_id, history, is_init, } = req.body;
|
||||
if (!message) {
|
||||
res.status(400).json({ error: '"message" is required' });
|
||||
return;
|
||||
@@ -227,7 +229,10 @@ app.post('/atlas/chat', async (req, res) => {
|
||||
const sessionId = session_id || `atlas_${Date.now()}`;
|
||||
const ctx = buildContext();
|
||||
try {
|
||||
const result = await (0, atlas_1.atlasChat)(sessionId, message, ctx, { preloadedHistory: history });
|
||||
const result = await (0, atlas_1.atlasChat)(sessionId, message, ctx, {
|
||||
preloadedHistory: history,
|
||||
isInit: is_init,
|
||||
});
|
||||
res.json(result);
|
||||
}
|
||||
catch (err) {
|
||||
@@ -320,6 +325,123 @@ app.post('/webhook/gitea', (req, res) => {
|
||||
});
|
||||
});
|
||||
// ---------------------------------------------------------------------------
|
||||
// Agent Execute — VIBN Build > Code > Agent tab
|
||||
//
|
||||
// Receives a task from the VIBN frontend, runs the Coder agent against
|
||||
// the project's Gitea repo, and streams progress back to the VIBN DB
|
||||
// via PATCH /api/projects/[id]/agent/sessions/[sid].
|
||||
//
|
||||
// This endpoint returns immediately (202) and runs the agent async so
|
||||
// the browser can close without killing the loop.
|
||||
// ---------------------------------------------------------------------------
|
||||
// Track active sessions for stop support
|
||||
const activeSessions = new Map();
|
||||
app.post('/agent/execute', async (req, res) => {
|
||||
const { sessionId, projectId, appName, appPath, giteaRepo, task } = req.body;
|
||||
if (!sessionId || !projectId || !appPath || !task) {
|
||||
res.status(400).json({ error: 'sessionId, projectId, appPath and task are required' });
|
||||
return;
|
||||
}
|
||||
const vibnApiUrl = process.env.VIBN_API_URL ?? 'https://vibnai.com';
|
||||
// Register session as active
|
||||
const sessionState = { stopped: false };
|
||||
activeSessions.set(sessionId, sessionState);
|
||||
// Respond immediately — execution is async
|
||||
res.status(202).json({ sessionId, status: 'running' });
|
||||
// Build workspace context — clone/update the Gitea repo if provided
|
||||
let ctx;
|
||||
try {
|
||||
ctx = buildContext(giteaRepo);
|
||||
}
|
||||
catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
console.error('[agent/execute] buildContext failed:', msg);
|
||||
// Notify VIBN DB of failure
|
||||
fetch(`${vibnApiUrl}/api/projects/${projectId}/agent/sessions/${sessionId}`, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ status: 'failed', error: msg }),
|
||||
}).catch(() => { });
|
||||
activeSessions.delete(sessionId);
|
||||
return;
|
||||
}
|
||||
// Scope workspace to the app subdirectory so the agent works there naturally
|
||||
if (appPath) {
|
||||
const path = require('path');
|
||||
ctx.workspaceRoot = path.join(ctx.workspaceRoot, appPath);
|
||||
const fs = require('fs');
|
||||
fs.mkdirSync(ctx.workspaceRoot, { recursive: true });
|
||||
}
|
||||
const agentConfig = agents_1.AGENTS['Coder'];
|
||||
if (!agentConfig) {
|
||||
fetch(`${vibnApiUrl}/api/projects/${projectId}/agent/sessions/${sessionId}`, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ status: 'failed', error: 'Coder agent not registered' }),
|
||||
}).catch(() => { });
|
||||
activeSessions.delete(sessionId);
|
||||
return;
|
||||
}
|
||||
// Run the streaming agent loop (fire and forget)
|
||||
(0, agent_session_runner_1.runSessionAgent)(agentConfig, task, ctx, {
|
||||
sessionId,
|
||||
projectId,
|
||||
vibnApiUrl,
|
||||
appPath,
|
||||
isStopped: () => sessionState.stopped,
|
||||
})
|
||||
.catch(err => {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
console.error(`[agent/execute] session ${sessionId} crashed:`, msg);
|
||||
fetch(`${vibnApiUrl}/api/projects/${projectId}/agent/sessions/${sessionId}`, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ status: 'failed', error: msg }),
|
||||
}).catch(() => { });
|
||||
})
|
||||
.finally(() => {
|
||||
activeSessions.delete(sessionId);
|
||||
});
|
||||
});
|
||||
app.post('/agent/stop', (req, res) => {
|
||||
const { sessionId } = req.body;
|
||||
if (!sessionId) {
|
||||
res.status(400).json({ error: 'sessionId required' });
|
||||
return;
|
||||
}
|
||||
const session = activeSessions.get(sessionId);
|
||||
if (session) {
|
||||
session.stopped = true;
|
||||
res.json({ ok: true, message: 'Stop signal sent — agent will halt after current step.' });
|
||||
}
|
||||
else {
|
||||
// Session may have already finished
|
||||
res.json({ ok: true, message: 'Session not active (may have already completed).' });
|
||||
}
|
||||
});
|
||||
// ---------------------------------------------------------------------------
|
||||
// Generate — thin structured-generation endpoint (no session, no system prompt)
|
||||
// Use this for one-shot tasks like architecture recommendations.
|
||||
// ---------------------------------------------------------------------------
|
||||
app.post('/generate', async (req, res) => {
|
||||
const { prompt, model } = req.body;
|
||||
if (!prompt) {
|
||||
res.status(400).json({ error: '"prompt" is required' });
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const llm = (0, llm_1.createLLM)(model ?? 'A', { temperature: 0.3 });
|
||||
const messages = [
|
||||
{ role: 'user', content: prompt }
|
||||
];
|
||||
const response = await llm.chat(messages, [], 8192);
|
||||
res.json({ reply: response.content ?? '' });
|
||||
}
|
||||
catch (err) {
|
||||
res.status(500).json({ error: err instanceof Error ? err.message : String(err) });
|
||||
}
|
||||
});
|
||||
// ---------------------------------------------------------------------------
|
||||
// Error handler
|
||||
// ---------------------------------------------------------------------------
|
||||
app.use((err, _req, res, _next) => {
|
||||
|
||||
1
dist/tools/index.d.ts
vendored
1
dist/tools/index.d.ts
vendored
@@ -7,6 +7,7 @@ import './agent';
|
||||
import './memory';
|
||||
import './skills';
|
||||
import './prd';
|
||||
import './search';
|
||||
export { ALL_TOOLS, executeTool, ToolDefinition } from './registry';
|
||||
export { ToolContext, MemoryUpdate } from './context';
|
||||
export { PROTECTED_GITEA_REPOS, PROTECTED_COOLIFY_PROJECT, PROTECTED_COOLIFY_APPS, assertGiteaWritable, assertCoolifyDeployable } from './security';
|
||||
|
||||
1
dist/tools/index.js
vendored
1
dist/tools/index.js
vendored
@@ -12,6 +12,7 @@ require("./agent");
|
||||
require("./memory");
|
||||
require("./skills");
|
||||
require("./prd");
|
||||
require("./search");
|
||||
// Re-export the public API — identical surface to the old tools.ts
|
||||
var registry_1 = require("./registry");
|
||||
Object.defineProperty(exports, "ALL_TOOLS", { enumerable: true, get: function () { return registry_1.ALL_TOOLS; } });
|
||||
|
||||
1
dist/tools/search.d.ts
vendored
Normal file
1
dist/tools/search.d.ts
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export {};
|
||||
78
dist/tools/search.js
vendored
Normal file
78
dist/tools/search.js
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const registry_1 = require("./registry");
|
||||
/**
|
||||
* Web search via DuckDuckGo HTML endpoint.
|
||||
* No API key required. Scrapes result snippets and titles.
|
||||
* Atlas uses this for competitor research, market context, pricing models, etc.
|
||||
*/
|
||||
(0, registry_1.registerTool)({
|
||||
name: 'web_search',
|
||||
description: 'Search the web for current information. Use this to research competitors, market trends, pricing models, existing solutions, technology choices, or any topic the user mentions that would benefit from real-world context. Returns a summary of top search results.',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: {
|
||||
type: 'string',
|
||||
description: 'The search query. Be specific — e.g. "SaaS project management tools pricing 2024" rather than just "project management".'
|
||||
}
|
||||
},
|
||||
required: ['query']
|
||||
},
|
||||
async handler(args) {
|
||||
const query = String(args.query).trim();
|
||||
if (!query)
|
||||
return { error: 'No query provided' };
|
||||
const url = `https://html.duckduckgo.com/html/?q=${encodeURIComponent(query)}`;
|
||||
try {
|
||||
const res = await fetch(url, {
|
||||
headers: {
|
||||
'User-Agent': 'Mozilla/5.0 (compatible; VIBN-Atlas/1.0)',
|
||||
'Accept': 'text/html',
|
||||
},
|
||||
signal: AbortSignal.timeout(15000),
|
||||
});
|
||||
if (!res.ok) {
|
||||
return { error: `Search failed with status ${res.status}` };
|
||||
}
|
||||
const html = await res.text();
|
||||
// Extract result titles and snippets from DuckDuckGo HTML
|
||||
const results = [];
|
||||
// Match result titles
|
||||
const titleMatches = html.matchAll(/class="result__a"[^>]*href="[^"]*"[^>]*>(.*?)<\/a>/gs);
|
||||
const titles = [];
|
||||
for (const m of titleMatches) {
|
||||
const title = m[1].replace(/<[^>]+>/g, '').trim();
|
||||
if (title)
|
||||
titles.push(title);
|
||||
}
|
||||
// Match result snippets
|
||||
const snippetMatches = html.matchAll(/class="result__snippet"[^>]*>(.*?)<\/a>/gs);
|
||||
const snippets = [];
|
||||
for (const m of snippetMatches) {
|
||||
const snippet = m[1].replace(/<[^>]+>/g, '').trim();
|
||||
if (snippet)
|
||||
snippets.push(snippet);
|
||||
}
|
||||
// Combine up to 6 results
|
||||
const count = Math.min(6, Math.max(titles.length, snippets.length));
|
||||
for (let i = 0; i < count; i++) {
|
||||
const title = titles[i] || '';
|
||||
const snippet = snippets[i] || '';
|
||||
if (title || snippet) {
|
||||
results.push(`**${title}**\n${snippet}`);
|
||||
}
|
||||
}
|
||||
if (results.length === 0) {
|
||||
return { error: 'No results found' };
|
||||
}
|
||||
const text = results.join('\n\n');
|
||||
const truncated = text.length > 5000 ? text.slice(0, 5000) + '\n\n[...results truncated]' : text;
|
||||
return { query, results: truncated };
|
||||
}
|
||||
catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
return { error: `Search request failed: ${message}` };
|
||||
}
|
||||
}
|
||||
});
|
||||
Reference in New Issue
Block a user