Files
vibn-agent-runner/dist/orchestrator.js
mawkone e29dccf745 refactor: implement three-layer agent architecture (agents / prompts / skills)
Layer 1 — src/agents/ (thin agent definitions, no prompt text)
  registry.ts   — AgentConfig, registerAgent(), getAgent(), AGENTS proxy, pick()
  orchestrator.ts, coder.ts, pm.ts, marketing.ts — one file each, just metadata + tool picks
  index.ts      — barrel: imports prompts then agents (correct registration order)

Layer 2 — src/prompts/ (prompt text separated from agent logic)
  loader.ts     — registerPrompt(), resolvePrompt() with {{variable}} substitution
  orchestrator.ts, coder.ts, pm.ts, marketing.ts — prompt templates as registered strings
  orchestrator.ts now uses resolvePrompt('orchestrator', { knowledge }) instead of
  inline SYSTEM_PROMPT const; {{knowledge}} variable injects project memory cleanly.
  agent-runner.ts uses resolvePrompt(config.promptId) per agent turn.

Layer 3 — src/tools/skills.ts (new skills capability)
  list_skills(repo)      — lists .skills/<name>/SKILL.md directories from a Gitea repo
  get_skill(repo, name)  — reads and returns the markdown body of a skill file
  Orchestrator and all agents now have get_skill in their tool sets.
  Orchestrator also has list_skills and references skills in its prompt.

Also fixed:
  - server.ts now passes history + knowledge_context from request body to orchestratorChat()
    (these were being sent by the frontend but silently dropped)
  - server.ts imports PROTECTED_GITEA_REPOS from tools/security.ts (no more duplicate)
  - Deleted src/agents.ts (replaced by src/agents/ directory)

Made-with: Cursor
2026-03-01 15:38:42 -08:00

134 lines
5.3 KiB
JavaScript

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.listSessions = listSessions;
exports.clearSession = clearSession;
exports.orchestratorChat = orchestratorChat;
const llm_1 = require("./llm");
const tools_1 = require("./tools");
const loader_1 = require("./prompts/loader");
const MAX_TURNS = 20;
const sessions = new Map();
function getOrCreateSession(sessionId) {
if (!sessions.has(sessionId)) {
sessions.set(sessionId, {
id: sessionId,
history: [],
createdAt: new Date().toISOString(),
lastActiveAt: new Date().toISOString()
});
}
const session = sessions.get(sessionId);
session.lastActiveAt = new Date().toISOString();
return session;
}
function listSessions() {
return Array.from(sessions.values()).map(s => ({
id: s.id,
messages: s.history.length,
createdAt: s.createdAt,
lastActiveAt: s.lastActiveAt
}));
}
function clearSession(sessionId) {
sessions.delete(sessionId);
}
// ---------------------------------------------------------------------------
// Main orchestrator chat — uses GLM-5 (Tier B) by default
// ---------------------------------------------------------------------------
async function orchestratorChat(sessionId, userMessage, ctx, opts) {
const modelId = process.env.ORCHESTRATOR_MODEL ?? 'B'; // Tier B = GLM-5
const llm = (0, llm_1.createLLM)(modelId, { temperature: 0.3 });
const session = getOrCreateSession(sessionId);
// Seed session from DB history if provided and session is fresh
if (opts?.preloadedHistory && opts.preloadedHistory.length > 0 && session.history.length === 0) {
session.history = [...opts.preloadedHistory];
}
const oaiTools = (0, llm_1.toOAITools)(tools_1.ALL_TOOLS);
// Append user message
session.history.push({ role: 'user', content: userMessage });
let turn = 0;
let finalReply = '';
let finalReasoning = null;
const toolCallNames = [];
// Resolve system prompt from template — {{knowledge}} injects project memory
const systemContent = (0, loader_1.resolvePrompt)('orchestrator', {
knowledge: opts?.knowledgeContext
? `## Project Memory (known facts)\n${opts.knowledgeContext}`
: ''
});
// Build messages with system prompt prepended; keep last 40 for cost control
const buildMessages = () => [
{ role: 'system', content: systemContent },
...session.history.slice(-40)
];
while (turn < MAX_TURNS) {
turn++;
const response = await llm.chat(buildMessages(), oaiTools, 4096);
// If GLM-5 is still reasoning (content null, finish_reason length) give it more tokens
if (response.content === null && response.tool_calls.length === 0 && response.finish_reason === 'length') {
// Retry with more tokens — model hit max_tokens during reasoning
const retry = await llm.chat(buildMessages(), oaiTools, 8192);
Object.assign(response, retry);
}
// Record reasoning for the final turn (informational, not stored in history)
if (response.reasoning)
finalReasoning = response.reasoning;
// Only push assistant message if it has actual content or tool calls;
// skip empty turns that result from mid-reasoning token exhaustion.
const hasContent = response.content !== null && response.content !== '';
const hasToolCalls = response.tool_calls.length > 0;
if (hasContent || hasToolCalls) {
const assistantMsg = {
role: 'assistant',
content: response.content,
tool_calls: hasToolCalls ? response.tool_calls : undefined
};
session.history.push(assistantMsg);
}
// No tool calls — we have the final answer
if (!hasToolCalls) {
finalReply = response.content ?? '';
break;
}
// Execute each tool call and collect results
for (const tc of response.tool_calls) {
const fnName = tc.function.name;
let fnArgs = {};
try {
fnArgs = JSON.parse(tc.function.arguments || '{}');
}
catch { /* bad JSON */ }
toolCallNames.push(fnName);
let result;
try {
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
}
catch (err) {
result = { error: err instanceof Error ? err.message : String(err) };
}
// Add tool result to history
session.history.push({
role: 'tool',
tool_call_id: tc.id,
name: fnName,
content: typeof result === 'string' ? result : JSON.stringify(result)
});
}
}
if (turn >= MAX_TURNS && !finalReply) {
finalReply = 'Hit the turn limit. Try a more specific request.';
}
return {
reply: finalReply,
reasoning: finalReasoning,
sessionId,
turns: turn,
toolCalls: toolCallNames,
model: llm.modelId,
history: session.history
.filter(m => m.role !== 'assistant' || m.content || m.tool_calls?.length)
.slice(-40),
memoryUpdates: ctx.memoryUpdates
};
}