Files
vibn-agent-runner/dist/orchestrator.js
mawkone d9368e4abd fix: compile dist from source in Docker, fix ChatResult interface
- Dockerfile now runs tsc during build so committed dist/ is never stale
- ChatResult interface was missing history[] and memoryUpdates[] fields
- Re-add missing MemoryUpdate import in orchestrator.ts
- Rebuild dist/ with all new fields included

Made-with: Cursor
2026-02-27 19:27:42 -08:00

173 lines
6.8 KiB
JavaScript

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.listSessions = listSessions;
exports.clearSession = clearSession;
exports.orchestratorChat = orchestratorChat;
const llm_1 = require("./llm");
const tools_1 = require("./tools");
const MAX_TURNS = 20;
const sessions = new Map();
function getOrCreateSession(sessionId) {
if (!sessions.has(sessionId)) {
sessions.set(sessionId, {
id: sessionId,
history: [],
createdAt: new Date().toISOString(),
lastActiveAt: new Date().toISOString()
});
}
const session = sessions.get(sessionId);
session.lastActiveAt = new Date().toISOString();
return session;
}
function listSessions() {
return Array.from(sessions.values()).map(s => ({
id: s.id,
messages: s.history.length,
createdAt: s.createdAt,
lastActiveAt: s.lastActiveAt
}));
}
function clearSession(sessionId) {
sessions.delete(sessionId);
}
// ---------------------------------------------------------------------------
// Orchestrator system prompt
// ---------------------------------------------------------------------------
const SYSTEM_PROMPT = `You are the Master Orchestrator for Vibn — an AI-powered cloud development platform.
You run continuously and have full awareness of the Vibn project. You can take autonomous action on behalf of the user.
## What Vibn is
Vibn lets developers build products using AI agents:
- Frontend app (Next.js) at vibnai.com
- Backend API at api.vibnai.com
- Agent runner (this system) at agents.vibnai.com
- Cloud IDE (Theia) at theia.vibnai.com
- Self-hosted Git at git.vibnai.com (user: mark)
- Deployments via Coolify at coolify.vibnai.com (server: 34.19.250.135, Montreal)
## Your tools
**Awareness** (understand current state first):
- list_repos — all Git repositories
- list_all_issues — open/in-progress work
- list_all_apps — deployed apps and their status
- get_app_status — health of a specific app
- read_repo_file — read any file from any repo without cloning
**Action** (get things done):
- spawn_agent — dispatch Coder, PM, or Marketing agent on a repo
- get_job_status — check a running agent job
- deploy_app — trigger a Coolify deployment
- gitea_create_issue — track work (label agent:coder/pm/marketing to auto-trigger)
- gitea_list_issues / gitea_close_issue — issue lifecycle
## Specialist agents you can spawn
- **Coder** — writes code, tests, commits, and pushes
- **PM** — docs, issues, sprint tracking
- **Marketing** — copy, release notes, blog posts
## How you work
1. Use awareness tools first if you need current state.
2. Break the task into concrete steps.
3. Spawn the right agent(s) with specific, detailed instructions.
4. Track and report on results.
5. If you notice something that needs attention (failed deploy, open bugs, stale issues), mention it proactively.
## Style
- Direct. No filler.
- Honest about uncertainty.
- When spawning agents, be specific — give them full context, not vague instructions.
- Keep responses concise unless the user needs detail.
## Security
- Never spawn agents on: mark/vibn-frontend, mark/theia-code-os, mark/vibn-agent-runner, mark/vibn-api, mark/master-ai
- Those are protected platform repos — read-only for you, not writable by agents.`;
// ---------------------------------------------------------------------------
// Main orchestrator chat — uses GLM-5 (Tier B) by default
// ---------------------------------------------------------------------------
async function orchestratorChat(sessionId, userMessage, ctx, opts) {
const modelId = process.env.ORCHESTRATOR_MODEL ?? 'B'; // Tier B = GLM-5
const llm = (0, llm_1.createLLM)(modelId, { temperature: 0.3 });
const session = getOrCreateSession(sessionId);
// Seed session from DB history if provided and session is fresh
if (opts?.preloadedHistory && opts.preloadedHistory.length > 0 && session.history.length === 0) {
session.history = [...opts.preloadedHistory];
}
const oaiTools = (0, llm_1.toOAITools)(tools_1.ALL_TOOLS);
// Append user message
session.history.push({ role: 'user', content: userMessage });
let turn = 0;
let finalReply = '';
let finalReasoning = null;
const toolCallNames = [];
// Build messages with system prompt prepended
const buildMessages = () => [
{ role: 'system', content: SYSTEM_PROMPT },
...session.history
];
while (turn < MAX_TURNS) {
turn++;
const response = await llm.chat(buildMessages(), oaiTools, 4096);
// If GLM-5 is still reasoning (content null, finish_reason length) give it more tokens
if (response.content === null && response.tool_calls.length === 0 && response.finish_reason === 'length') {
// Retry with more tokens — model hit max_tokens during reasoning
const retry = await llm.chat(buildMessages(), oaiTools, 8192);
Object.assign(response, retry);
}
// Record reasoning for the final turn (informational, not stored in history)
if (response.reasoning)
finalReasoning = response.reasoning;
// Build assistant message to add to history
const assistantMsg = {
role: 'assistant',
content: response.content,
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
};
session.history.push(assistantMsg);
// No tool calls — we have the final answer
if (response.tool_calls.length === 0) {
finalReply = response.content ?? '';
break;
}
// Execute each tool call and collect results
for (const tc of response.tool_calls) {
const fnName = tc.function.name;
let fnArgs = {};
try {
fnArgs = JSON.parse(tc.function.arguments || '{}');
}
catch { /* bad JSON */ }
toolCallNames.push(fnName);
let result;
try {
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
}
catch (err) {
result = { error: err instanceof Error ? err.message : String(err) };
}
// Add tool result to history
session.history.push({
role: 'tool',
tool_call_id: tc.id,
name: fnName,
content: typeof result === 'string' ? result : JSON.stringify(result)
});
}
}
if (turn >= MAX_TURNS && !finalReply) {
finalReply = 'Hit the turn limit. Try a more specific request.';
}
return {
reply: finalReply,
reasoning: finalReasoning,
sessionId,
turns: turn,
toolCalls: toolCallNames,
model: llm.modelId,
history: session.history.slice(-40),
memoryUpdates: ctx.memoryUpdates
};
}