fix: compile dist from source in Docker, fix ChatResult interface
- Dockerfile now runs tsc during build so committed dist/ is never stale - ChatResult interface was missing history[] and memoryUpdates[] fields - Re-add missing MemoryUpdate import in orchestrator.ts - Rebuild dist/ with all new fields included Made-with: Cursor
This commit is contained in:
198
dist/orchestrator.js
vendored
198
dist/orchestrator.js
vendored
@@ -3,7 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.listSessions = listSessions;
|
||||
exports.clearSession = clearSession;
|
||||
exports.orchestratorChat = orchestratorChat;
|
||||
const genai_1 = require("@google/genai");
|
||||
const llm_1 = require("./llm");
|
||||
const tools_1 = require("./tools");
|
||||
const MAX_TURNS = 20;
|
||||
const sessions = new Map();
|
||||
@@ -32,131 +32,141 @@ function clearSession(sessionId) {
|
||||
sessions.delete(sessionId);
|
||||
}
|
||||
// ---------------------------------------------------------------------------
|
||||
// Orchestrator system prompt — full Vibn context
|
||||
// Orchestrator system prompt
|
||||
// ---------------------------------------------------------------------------
|
||||
const SYSTEM_PROMPT = `You are the Master Orchestrator for Vibn — an AI-powered cloud development platform.
|
||||
|
||||
You are always running. You have full awareness of the Vibn project and can take autonomous action.
|
||||
You run continuously and have full awareness of the Vibn project. You can take autonomous action on behalf of the user.
|
||||
|
||||
## What Vibn is
|
||||
Vibn is a platform that lets developers build products using AI agents. It includes:
|
||||
- A cloud IDE (Theia) at theia.vibnai.com
|
||||
- A frontend app (Next.js) at vibnai.com
|
||||
- A backend API at api.vibnai.com
|
||||
- An agent runner (this system) at agents.vibnai.com
|
||||
- Self-hosted Git at git.vibnai.com
|
||||
- Self-hosted deployments via Coolify at coolify.vibnai.com
|
||||
Vibn lets developers build products using AI agents:
|
||||
- Frontend app (Next.js) at vibnai.com
|
||||
- Backend API at api.vibnai.com
|
||||
- Agent runner (this system) at agents.vibnai.com
|
||||
- Cloud IDE (Theia) at theia.vibnai.com
|
||||
- Self-hosted Git at git.vibnai.com (user: mark)
|
||||
- Deployments via Coolify at coolify.vibnai.com (server: 34.19.250.135, Montreal)
|
||||
|
||||
## Your capabilities
|
||||
You have access to tools that give you full project control:
|
||||
## Your tools
|
||||
|
||||
**Awareness tools** (use these to understand current state):
|
||||
- list_repos — see all Git repositories
|
||||
- list_all_issues — see what work is open or in progress
|
||||
- list_all_apps — see all deployed apps and their status
|
||||
- get_app_status — check if a specific app is running and healthy
|
||||
**Awareness** (understand current state first):
|
||||
- list_repos — all Git repositories
|
||||
- list_all_issues — open/in-progress work
|
||||
- list_all_apps — deployed apps and their status
|
||||
- get_app_status — health of a specific app
|
||||
- read_repo_file — read any file from any repo without cloning
|
||||
|
||||
**Action tools** (use these to get things done):
|
||||
- spawn_agent — dispatch Coder, PM, or Marketing agent to do work on a repo
|
||||
- get_job_status — check if a spawned agent job is done
|
||||
- deploy_app — trigger a Coolify deployment after code is committed
|
||||
- gitea_create_issue — create a tracked issue (also triggers agent webhook if labelled)
|
||||
- gitea_list_issues, gitea_close_issue — manage issue lifecycle
|
||||
**Action** (get things done):
|
||||
- spawn_agent — dispatch Coder, PM, or Marketing agent on a repo
|
||||
- get_job_status — check a running agent job
|
||||
- deploy_app — trigger a Coolify deployment
|
||||
- gitea_create_issue — track work (label agent:coder/pm/marketing to auto-trigger)
|
||||
- gitea_list_issues / gitea_close_issue — issue lifecycle
|
||||
|
||||
## Available agents you can spawn
|
||||
- **Coder** — writes code, edits files, runs commands, commits and pushes
|
||||
- **PM** — writes documentation, manages issues, creates reports
|
||||
- **Marketing** — writes copy, blog posts, release notes
|
||||
## Specialist agents you can spawn
|
||||
- **Coder** — writes code, tests, commits, and pushes
|
||||
- **PM** — docs, issues, sprint tracking
|
||||
- **Marketing** — copy, release notes, blog posts
|
||||
|
||||
## How you work
|
||||
1. When the user gives you a task, think about what needs to happen.
|
||||
2. Use awareness tools first to understand current state if needed.
|
||||
3. Break the task into concrete actions.
|
||||
4. Spawn the right agents with detailed, specific task descriptions.
|
||||
5. Check back on job status if the user wants to track progress.
|
||||
6. Report clearly what was done and what's next.
|
||||
1. Use awareness tools first if you need current state.
|
||||
2. Break the task into concrete steps.
|
||||
3. Spawn the right agent(s) with specific, detailed instructions.
|
||||
4. Track and report on results.
|
||||
5. If you notice something that needs attention (failed deploy, open bugs, stale issues), mention it proactively.
|
||||
|
||||
## Your personality
|
||||
- Direct and clear. No fluff.
|
||||
- Proactive — if you notice something that needs fixing, mention it.
|
||||
- Honest about what you can and can't do.
|
||||
- You speak for the whole system, not just one agent.
|
||||
## Style
|
||||
- Direct. No filler.
|
||||
- Honest about uncertainty.
|
||||
- When spawning agents, be specific — give them full context, not vague instructions.
|
||||
- Keep responses concise unless the user needs detail.
|
||||
|
||||
## Important context
|
||||
- All repos are owned by "mark" on git.vibnai.com
|
||||
- The main repos are: vibn-frontend, vibn-api, vibn-agent-runner, theia-code-os
|
||||
- The stack: Next.js (frontend), Node.js (API + agent runner), Theia (IDE)
|
||||
- Coolify manages all deployments on server 34.19.250.135 (Montreal)
|
||||
- Agent label routing: agent:coder, agent:pm, agent:marketing on Gitea issues`;
|
||||
async function orchestratorChat(sessionId, userMessage, ctx) {
|
||||
const apiKey = process.env.GOOGLE_API_KEY;
|
||||
if (!apiKey)
|
||||
throw new Error('GOOGLE_API_KEY not set');
|
||||
const genai = new genai_1.GoogleGenAI({ apiKey });
|
||||
## Security
|
||||
- Never spawn agents on: mark/vibn-frontend, mark/theia-code-os, mark/vibn-agent-runner, mark/vibn-api, mark/master-ai
|
||||
- Those are protected platform repos — read-only for you, not writable by agents.`;
|
||||
// ---------------------------------------------------------------------------
|
||||
// Main orchestrator chat — uses GLM-5 (Tier B) by default
|
||||
// ---------------------------------------------------------------------------
|
||||
async function orchestratorChat(sessionId, userMessage, ctx, opts) {
|
||||
const modelId = process.env.ORCHESTRATOR_MODEL ?? 'B'; // Tier B = GLM-5
|
||||
const llm = (0, llm_1.createLLM)(modelId, { temperature: 0.3 });
|
||||
const session = getOrCreateSession(sessionId);
|
||||
// Orchestrator gets ALL tools
|
||||
const functionDeclarations = tools_1.ALL_TOOLS.map(t => ({
|
||||
name: t.name,
|
||||
description: t.description,
|
||||
parameters: t.parameters
|
||||
}));
|
||||
// Add user message to history
|
||||
session.history.push({ role: 'user', parts: [{ text: userMessage }] });
|
||||
// Seed session from DB history if provided and session is fresh
|
||||
if (opts?.preloadedHistory && opts.preloadedHistory.length > 0 && session.history.length === 0) {
|
||||
session.history = [...opts.preloadedHistory];
|
||||
}
|
||||
const oaiTools = (0, llm_1.toOAITools)(tools_1.ALL_TOOLS);
|
||||
// Append user message
|
||||
session.history.push({ role: 'user', content: userMessage });
|
||||
let turn = 0;
|
||||
let finalReply = '';
|
||||
let finalReasoning = null;
|
||||
const toolCallNames = [];
|
||||
// Build messages with system prompt prepended
|
||||
const buildMessages = () => [
|
||||
{ role: 'system', content: SYSTEM_PROMPT },
|
||||
...session.history
|
||||
];
|
||||
while (turn < MAX_TURNS) {
|
||||
turn++;
|
||||
const response = await genai.models.generateContent({
|
||||
model: 'gemini-2.5-flash',
|
||||
contents: session.history,
|
||||
config: {
|
||||
systemInstruction: SYSTEM_PROMPT,
|
||||
tools: [{ functionDeclarations }],
|
||||
temperature: 0.3,
|
||||
maxOutputTokens: 8192
|
||||
}
|
||||
});
|
||||
const candidate = response.candidates?.[0];
|
||||
if (!candidate)
|
||||
throw new Error('No response from Gemini');
|
||||
const modelContent = {
|
||||
role: 'model',
|
||||
parts: candidate.content?.parts || []
|
||||
const response = await llm.chat(buildMessages(), oaiTools, 4096);
|
||||
// If GLM-5 is still reasoning (content null, finish_reason length) give it more tokens
|
||||
if (response.content === null && response.tool_calls.length === 0 && response.finish_reason === 'length') {
|
||||
// Retry with more tokens — model hit max_tokens during reasoning
|
||||
const retry = await llm.chat(buildMessages(), oaiTools, 8192);
|
||||
Object.assign(response, retry);
|
||||
}
|
||||
// Record reasoning for the final turn (informational, not stored in history)
|
||||
if (response.reasoning)
|
||||
finalReasoning = response.reasoning;
|
||||
// Build assistant message to add to history
|
||||
const assistantMsg = {
|
||||
role: 'assistant',
|
||||
content: response.content,
|
||||
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
|
||||
};
|
||||
session.history.push(modelContent);
|
||||
const functionCalls = candidate.content?.parts?.filter(p => p.functionCall) ?? [];
|
||||
// No more tool calls — we have the final answer
|
||||
if (functionCalls.length === 0) {
|
||||
finalReply = candidate.content?.parts
|
||||
?.filter(p => p.text)
|
||||
.map(p => p.text)
|
||||
.join('') ?? '';
|
||||
session.history.push(assistantMsg);
|
||||
// No tool calls — we have the final answer
|
||||
if (response.tool_calls.length === 0) {
|
||||
finalReply = response.content ?? '';
|
||||
break;
|
||||
}
|
||||
// Execute tool calls
|
||||
const toolResultParts = [];
|
||||
for (const part of functionCalls) {
|
||||
const call = part.functionCall;
|
||||
const callName = call.name ?? 'unknown';
|
||||
const callArgs = (call.args ?? {});
|
||||
toolCallNames.push(callName);
|
||||
// Execute each tool call and collect results
|
||||
for (const tc of response.tool_calls) {
|
||||
const fnName = tc.function.name;
|
||||
let fnArgs = {};
|
||||
try {
|
||||
fnArgs = JSON.parse(tc.function.arguments || '{}');
|
||||
}
|
||||
catch { /* bad JSON */ }
|
||||
toolCallNames.push(fnName);
|
||||
let result;
|
||||
try {
|
||||
result = await (0, tools_1.executeTool)(callName, callArgs, ctx);
|
||||
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
|
||||
}
|
||||
catch (err) {
|
||||
result = { error: err instanceof Error ? err.message : String(err) };
|
||||
}
|
||||
toolResultParts.push({
|
||||
functionResponse: { name: callName, response: { result } }
|
||||
// Add tool result to history
|
||||
session.history.push({
|
||||
role: 'tool',
|
||||
tool_call_id: tc.id,
|
||||
name: fnName,
|
||||
content: typeof result === 'string' ? result : JSON.stringify(result)
|
||||
});
|
||||
}
|
||||
session.history.push({ role: 'user', parts: toolResultParts });
|
||||
}
|
||||
if (turn >= MAX_TURNS && !finalReply) {
|
||||
finalReply = 'I hit the turn limit. Please try a more specific request.';
|
||||
finalReply = 'Hit the turn limit. Try a more specific request.';
|
||||
}
|
||||
return { reply: finalReply, sessionId, turns: turn, toolCalls: toolCallNames };
|
||||
return {
|
||||
reply: finalReply,
|
||||
reasoning: finalReasoning,
|
||||
sessionId,
|
||||
turns: turn,
|
||||
toolCalls: toolCallNames,
|
||||
model: llm.modelId,
|
||||
history: session.history.slice(-40),
|
||||
memoryUpdates: ctx.memoryUpdates
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user