Files
vibn-agent-runner/dist/agent-runner.js
mawkone e29dccf745 refactor: implement three-layer agent architecture (agents / prompts / skills)
Layer 1 — src/agents/ (thin agent definitions, no prompt text)
  registry.ts   — AgentConfig, registerAgent(), getAgent(), AGENTS proxy, pick()
  orchestrator.ts, coder.ts, pm.ts, marketing.ts — one file each, just metadata + tool picks
  index.ts      — barrel: imports prompts then agents (correct registration order)

Layer 2 — src/prompts/ (prompt text separated from agent logic)
  loader.ts     — registerPrompt(), resolvePrompt() with {{variable}} substitution
  orchestrator.ts, coder.ts, pm.ts, marketing.ts — prompt templates as registered strings
  orchestrator.ts now uses resolvePrompt('orchestrator', { knowledge }) instead of
  inline SYSTEM_PROMPT const; {{knowledge}} variable injects project memory cleanly.
  agent-runner.ts uses resolvePrompt(config.promptId) per agent turn.

Layer 3 — src/tools/skills.ts (new skills capability)
  list_skills(repo)      — lists .skills/<name>/SKILL.md directories from a Gitea repo
  get_skill(repo, name)  — reads and returns the markdown body of a skill file
  Orchestrator and all agents now have get_skill in their tool sets.
  Orchestrator also has list_skills and references skills in its prompt.

Also fixed:
  - server.ts now passes history + knowledge_context from request body to orchestratorChat()
    (these were being sent by the frontend but silently dropped)
  - server.ts imports PROTECTED_GITEA_REPOS from tools/security.ts (no more duplicate)
  - Deleted src/agents.ts (replaced by src/agents/ directory)

Made-with: Cursor
2026-03-01 15:38:42 -08:00

83 lines
3.0 KiB
JavaScript

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.runAgent = runAgent;
const llm_1 = require("./llm");
const tools_1 = require("./tools");
const loader_1 = require("./prompts/loader");
const job_store_1 = require("./job-store");
const MAX_TURNS = 40;
/**
* Core agent execution loop — model-agnostic via the unified LLM client.
*
* Agents use their configured model tier (A/B/C) or a specific model ID.
* Tool calling uses OpenAI format throughout.
*/
async function runAgent(job, config, task, ctx) {
const llm = (0, llm_1.createLLM)(config.model, { temperature: 0.2 });
const oaiTools = (0, llm_1.toOAITools)(config.tools);
const history = [
{ role: 'user', content: task }
];
let toolCallCount = 0;
let turn = 0;
let finalText = '';
(0, job_store_1.updateJob)(job.id, { status: 'running', progress: `Starting ${config.name} (${llm.modelId})…` });
while (turn < MAX_TURNS) {
turn++;
const systemPrompt = (0, loader_1.resolvePrompt)(config.promptId);
const messages = [
{ role: 'system', content: systemPrompt },
...history
];
const response = await llm.chat(messages, oaiTools, 8192);
// Build assistant message for history
const assistantMsg = {
role: 'assistant',
content: response.content,
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
};
history.push(assistantMsg);
// No tool calls — agent is done
if (response.tool_calls.length === 0) {
finalText = response.content ?? '';
break;
}
// Execute tool calls
for (const tc of response.tool_calls) {
const fnName = tc.function.name;
let fnArgs = {};
try {
fnArgs = JSON.parse(tc.function.arguments || '{}');
}
catch { /* bad JSON */ }
toolCallCount++;
(0, job_store_1.updateJob)(job.id, {
progress: `Turn ${turn}: calling ${fnName}`,
toolCalls: [...(job.toolCalls || []), {
turn,
tool: fnName,
args: fnArgs,
timestamp: new Date().toISOString()
}]
});
let result;
try {
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
}
catch (err) {
result = { error: err instanceof Error ? err.message : String(err) };
}
history.push({
role: 'tool',
tool_call_id: tc.id,
name: fnName,
content: typeof result === 'string' ? result : JSON.stringify(result)
});
}
}
if (turn >= MAX_TURNS && !finalText) {
finalText = `Agent hit the ${MAX_TURNS}-turn safety limit. Tool calls made: ${toolCallCount}.`;
}
return { finalText, toolCallCount, turns: turn, model: llm.modelId };
}