fix: compile dist from source in Docker, fix ChatResult interface

- Dockerfile now runs tsc during build so committed dist/ is never stale
- ChatResult interface was missing history[] and memoryUpdates[] fields
- Re-add missing MemoryUpdate import in orchestrator.ts
- Rebuild dist/ with all new fields included

Made-with: Cursor
This commit is contained in:
2026-02-27 19:27:42 -08:00
parent 837b6e8b8d
commit d9368e4abd
14 changed files with 675 additions and 259 deletions

View File

@@ -5,12 +5,12 @@ export interface RunResult {
finalText: string;
toolCallCount: number;
turns: number;
model: string;
}
/**
* Core Gemini agent loop.
* Core agent execution loop — model-agnostic via the unified LLM client.
*
* Sends the task to Gemini with the agent's system prompt and tools,
* then loops: execute tool calls → send results back → repeat until
* the model stops calling tools or MAX_TURNS is reached.
* Agents use their configured model tier (A/B/C) or a specific model ID.
* Tool calling uses OpenAI format throughout.
*/
export declare function runAgent(job: Job, config: AgentConfig, task: string, ctx: ToolContext): Promise<RunResult>;

125
dist/agent-runner.js vendored
View File

@@ -1,117 +1,80 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.runAgent = runAgent;
const genai_1 = require("@google/genai");
const llm_1 = require("./llm");
const tools_1 = require("./tools");
const job_store_1 = require("./job-store");
const MAX_TURNS = 40; // safety cap — prevents infinite loops
const MAX_TURNS = 40;
/**
* Core Gemini agent loop.
* Core agent execution loop — model-agnostic via the unified LLM client.
*
* Sends the task to Gemini with the agent's system prompt and tools,
* then loops: execute tool calls → send results back → repeat until
* the model stops calling tools or MAX_TURNS is reached.
* Agents use their configured model tier (A/B/C) or a specific model ID.
* Tool calling uses OpenAI format throughout.
*/
async function runAgent(job, config, task, ctx) {
const apiKey = process.env.GOOGLE_API_KEY;
if (!apiKey) {
throw new Error('GOOGLE_API_KEY environment variable is not set');
}
const genai = new genai_1.GoogleGenAI({ apiKey });
// Build Gemini function declarations from our tool definitions
const functionDeclarations = config.tools.map(tool => ({
name: tool.name,
description: tool.description,
parameters: tool.parameters
}));
const tools = functionDeclarations.length > 0
? [{ functionDeclarations }]
: [];
const model = genai.models;
// Build conversation history
const history = [];
// Initial user message
let currentMessage = {
role: 'user',
parts: [{ text: task }]
};
const llm = (0, llm_1.createLLM)(config.model, { temperature: 0.2 });
const oaiTools = (0, llm_1.toOAITools)(config.tools);
const history = [
{ role: 'user', content: task }
];
let toolCallCount = 0;
let turn = 0;
let finalText = '';
(0, job_store_1.updateJob)(job.id, { status: 'running', progress: `Starting ${config.name} agent...` });
(0, job_store_1.updateJob)(job.id, { status: 'running', progress: `Starting ${config.name} (${llm.modelId})…` });
while (turn < MAX_TURNS) {
turn++;
// Add current message to history
history.push(currentMessage);
// Call Gemini
const response = await model.generateContent({
model: config.model || 'gemini-2.0-flash',
contents: history,
config: {
systemInstruction: config.systemPrompt,
tools: tools.length > 0 ? tools : undefined,
temperature: 0.2,
maxOutputTokens: 8192
}
});
const candidate = response.candidates?.[0];
if (!candidate) {
throw new Error('No response from Gemini');
}
// Add model response to history
const modelContent = {
role: 'model',
parts: candidate.content?.parts || []
const messages = [
{ role: 'system', content: config.systemPrompt },
...history
];
const response = await llm.chat(messages, oaiTools, 8192);
// Build assistant message for history
const assistantMsg = {
role: 'assistant',
content: response.content,
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
};
history.push(modelContent);
// Extract function calls from the response
const functionCalls = candidate.content?.parts?.filter(p => p.functionCall) ?? [];
if (functionCalls.length === 0) {
// No tool calls — the agent is done
finalText = candidate.content?.parts
?.filter(p => p.text)
.map(p => p.text)
.join('') ?? '';
history.push(assistantMsg);
// No tool calls — agent is done
if (response.tool_calls.length === 0) {
finalText = response.content ?? '';
break;
}
// Execute all tool calls
const toolResultParts = [];
for (const part of functionCalls) {
const call = part.functionCall;
const callName = call.name ?? 'unknown';
const callArgs = (call.args ?? {});
// Execute tool calls
for (const tc of response.tool_calls) {
const fnName = tc.function.name;
let fnArgs = {};
try {
fnArgs = JSON.parse(tc.function.arguments || '{}');
}
catch { /* bad JSON */ }
toolCallCount++;
(0, job_store_1.updateJob)(job.id, {
progress: `Turn ${turn}: calling ${callName}...`,
progress: `Turn ${turn}: calling ${fnName}`,
toolCalls: [...(job.toolCalls || []), {
turn,
tool: callName,
args: callArgs,
tool: fnName,
args: fnArgs,
timestamp: new Date().toISOString()
}]
});
let result;
try {
result = await (0, tools_1.executeTool)(callName, callArgs, ctx);
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
}
catch (err) {
result = { error: err instanceof Error ? err.message : String(err) };
}
toolResultParts.push({
functionResponse: {
name: callName,
response: { result }
}
history.push({
role: 'tool',
tool_call_id: tc.id,
name: fnName,
content: typeof result === 'string' ? result : JSON.stringify(result)
});
}
// Next turn: send tool results back to the model
currentMessage = {
role: 'user',
parts: toolResultParts
};
}
if (turn >= MAX_TURNS && !finalText) {
finalText = `Agent reached the ${MAX_TURNS}-turn safety limit. Last tool call count: ${toolCallCount}.`;
finalText = `Agent hit the ${MAX_TURNS}-turn safety limit. Tool calls made: ${toolCallCount}.`;
}
return { finalText, toolCallCount, turns: turn };
return { finalText, toolCallCount, turns: turn, model: llm.modelId };
}

124
dist/agents.js vendored
View File

@@ -13,108 +13,104 @@ function pick(names) {
}
// ---------------------------------------------------------------------------
// Agent definitions
//
// model is a tier ('A' | 'B' | 'C') or a specific model ID.
// Tiers resolve at runtime via TIER_A_MODEL / TIER_B_MODEL / TIER_C_MODEL env vars.
//
// Tier A = gemini-2.5-flash — fast, cheap: routing, summaries, monitoring
// Tier B = zai-org/glm-5-maas — workhorse coding model
// Tier C = zai-org/glm-5-maas — complex decisions (or Claude Sonnet via TIER_C_MODEL)
// ---------------------------------------------------------------------------
exports.AGENTS = {
Orchestrator: {
name: 'Orchestrator',
description: 'Master coordinator that breaks down high-level goals and delegates to specialist agents',
model: 'gemini-2.5-flash',
systemPrompt: `You are the Orchestrator for Vibn, an autonomous AI system for software development.
description: 'Master coordinator breaks down goals and delegates to specialist agents',
model: 'B', // GLM-5 — good planner, chain-of-thought reasoning
systemPrompt: `You are the Orchestrator for Vibn, an autonomous AI platform for software development.
Your role is to:
1. Understand the high-level goal provided in the task.
2. Break it down into concrete sub-tasks.
3. Delegate sub-tasks to the appropriate specialist agents using the spawn_agent tool.
4. Use Gitea to track progress: create an issue at the start, close it when done.
5. Summarize what was done when complete.
Your role:
1. Understand the high-level goal.
2. Break it into concrete sub-tasks.
3. Delegate to the right specialist agents via spawn_agent.
4. Track progress via Gitea issues.
5. Summarize results when done.
Available specialist agents and when to use them:
- **Coder**: Any code changes features, bug fixes, refactors, tests.
- **PM**: Project management — issue triage, sprint planning, documentation updates.
- **Marketing**: Content and copy blog posts, landing page copy, release notes.
Agents available:
- Coder: code changes, features, bug fixes, tests.
- PM: issue triage, docs, sprint planning.
- Marketing: copy, blog posts, release notes.
Rules:
- Always create a Gitea issue first to track the work.
- Delegate to ONE agent at a time unless tasks are fully independent.
- Check back on progress by listing issues.
- Never try to write code yourself — delegate to Coder.
- Be concise in your task descriptions when spawning agents.`,
- Create a Gitea issue first to track the work.
- Delegate one agent at a time unless tasks are fully independent.
- Never write code yourself — delegate to Coder.
- Be specific in task descriptions when spawning agents.`,
tools: pick([...GITEA_TOOLS, ...SPAWN_TOOL, ...COOLIFY_TOOLS])
},
Coder: {
name: 'Coder',
description: 'Senior software engineer — writes, edits, and tests code. Commits and pushes when done.',
model: 'gemini-2.5-flash',
systemPrompt: `You are an expert senior software engineer working autonomously on a git repository.
description: 'Senior software engineer — writes, edits, tests, commits, and pushes code',
model: 'B', // GLM-5 — strong at code generation and diffs
systemPrompt: `You are an expert senior software engineer working autonomously on a Git repository.
Your job is to complete the coding task given to you. Follow these rules:
**Workflow:**
1. Start by exploring the codebase: list_directory, find_files, read_file to understand structure.
2. Search for relevant code: search_code to find existing patterns.
Workflow:
1. Explore the codebase: list_directory, find_files, read_file.
2. Search for patterns: search_code.
3. Plan your changes before making them.
4. Read every file BEFORE editing it.
5. Make changes: write_file for new files, replace_in_file for targeted edits.
6. Run tests or lint if applicable: execute_command.
7. Commit and push when the task is complete: git_commit_and_push.
6. Run tests/lint if applicable: execute_command.
7. Commit and push when complete: git_commit_and_push.
**Code quality rules:**
- Match existing code style exactly.
- Never leave TODO comments — implement or skip.
Code quality:
- Match existing style exactly.
- No TODO comments — implement or skip.
- Write complete files, not partial snippets.
- If tests exist, run them and fix failures before committing.
- Commit message should be concise and in imperative mood (e.g. "add user authentication").
- Run tests and fix failures before committing.
- Commit messages: imperative mood, concise (e.g. "add user authentication").
**Safety rules:**
- Never delete files unless explicitly instructed.
- Never modify .env files or credentials.
Safety:
- Never delete files unless explicitly told to.
- Never touch .env files or credentials.
- Never commit secrets or API keys.
**If you were triggered by a Gitea issue:**
- After committing, close the issue using gitea_close_issue.
- The repo name is in the format "owner/name".
Be methodical. Read before you write. Test before you commit.`,
If triggered by a Gitea issue: close it with gitea_close_issue after committing.`,
tools: pick([...FILE_TOOLS, ...SHELL_TOOLS, ...GIT_TOOLS, ...GITEA_TOOLS])
},
PM: {
name: 'PM',
description: 'Product manager — manages Gitea issues, writes documentation, tracks project health',
model: 'gemini-2.5-flash',
description: 'Product manager — docs, issue management, project health reports',
model: 'A', // Gemini Flash — lightweight, cheap for docs/issue work
systemPrompt: `You are an autonomous Product Manager for a software project hosted on Gitea.
Your responsibilities:
1. Create, update, and close Gitea issues to track work.
2. Write and update documentation files in the repository.
Responsibilities:
1. Create, update, and close Gitea issues.
2. Write and update docs in the repository.
3. Summarize project state and create reports.
4. Prioritize and triage bugs/features based on impact.
4. Triage bugs and features by impact.
When writing documentation:
- Be clear and concise.
- Use markdown formatting.
- Focus on what users and developers need to know.
- Keep docs up to date with the actual codebase state.
Always commit documentation updates after writing them.`,
When writing docs:
- Clear and concise.
- Markdown formatting.
- Keep docs in sync with the codebase.
- Always commit after writing.`,
tools: pick([...GITEA_TOOLS, ...FILE_TOOLS, ...GIT_TOOLS])
},
Marketing: {
name: 'Marketing',
description: 'Marketing specialist — writes copy, blog posts, release notes, and landing page content',
model: 'gemini-2.5-flash',
description: 'Marketing specialist — copy, blog posts, release notes, landing page content',
model: 'A', // Gemini Flash — cheap for content generation
systemPrompt: `You are an autonomous Marketing specialist for a SaaS product called Vibn.
Vibn is a cloud-based AI-powered development environment. It helps development teams build faster with AI agents that can write code, manage projects, and deploy automatically.
Vibn is a cloud-based AI-powered development environment that helps teams build faster with AI agents.
Your responsibilities:
1. Write compelling marketing copy for landing pages, email campaigns, and social media.
2. Write technical blog posts that explain features in an accessible way.
Responsibilities:
1. Write landing page copy, emails, and social media content.
2. Write technical blog posts explaining features accessibly.
3. Write release notes that highlight user-facing value.
4. Ensure all copy is on-brand: professional, clear, forward-thinking, and developer-friendly.
4. Maintain brand voice: smart, confident, practical. No hype, no jargon.
Brand voice: Smart, confident, practical. No hype. No jargon. Show don't tell.
When writing content, create actual files in the repository (e.g. blog/2026-02-release.md) and commit them.`,
Always create real files in the repo (e.g. blog/2026-02-release.md) and commit them.`,
tools: pick([...FILE_TOOLS, ...GIT_TOOLS])
}
};

67
dist/llm.d.ts vendored Normal file
View File

@@ -0,0 +1,67 @@
export interface LLMMessage {
role: 'system' | 'user' | 'assistant' | 'tool';
content: string | null;
tool_calls?: LLMToolCall[];
tool_call_id?: string;
name?: string;
}
export interface LLMToolCall {
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}
export interface LLMTool {
type: 'function';
function: {
name: string;
description: string;
parameters: Record<string, unknown>;
};
}
export interface LLMResponse {
content: string | null;
reasoning: string | null;
tool_calls: LLMToolCall[];
finish_reason: string;
usage?: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
export interface LLMClient {
modelId: string;
chat(messages: LLMMessage[], tools?: LLMTool[], maxTokens?: number): Promise<LLMResponse>;
}
export declare class VertexOpenAIClient implements LLMClient {
modelId: string;
private projectId;
private region;
private temperature;
constructor(modelId: string, opts?: {
projectId?: string;
region?: string;
temperature?: number;
});
chat(messages: LLMMessage[], tools?: LLMTool[], maxTokens?: number): Promise<LLMResponse>;
}
export declare class GeminiClient implements LLMClient {
modelId: string;
private temperature;
constructor(modelId?: string, opts?: {
temperature?: number;
});
chat(messages: LLMMessage[], tools?: LLMTool[], maxTokens?: number): Promise<LLMResponse>;
}
export type ModelTier = 'A' | 'B' | 'C';
export declare function createLLM(modelOrTier: string | ModelTier, opts?: {
temperature?: number;
}): LLMClient;
export declare function toOAITools(tools: Array<{
name: string;
description: string;
parameters: Record<string, unknown>;
}>): LLMTool[];

197
dist/llm.js vendored Normal file
View File

@@ -0,0 +1,197 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.GeminiClient = exports.VertexOpenAIClient = void 0;
exports.createLLM = createLLM;
exports.toOAITools = toOAITools;
const child_process_1 = require("child_process");
const genai_1 = require("@google/genai");
const uuid_1 = require("uuid");
// ---------------------------------------------------------------------------
// Vertex AI OpenAI-compatible client
// Used for: zai-org/glm-5-maas, anthropic/claude-sonnet-4-6, etc.
// ---------------------------------------------------------------------------
let _cachedToken = '';
let _tokenExpiry = 0;
function getVertexToken() {
const now = Date.now();
if (_cachedToken && now < _tokenExpiry)
return _cachedToken;
_cachedToken = (0, child_process_1.execSync)('gcloud auth print-access-token', { encoding: 'utf8' }).trim();
_tokenExpiry = now + 55 * 60 * 1000; // tokens last 1hr, refresh at 55min
return _cachedToken;
}
class VertexOpenAIClient {
constructor(modelId, opts) {
this.modelId = modelId;
this.projectId = opts?.projectId ?? process.env.GCP_PROJECT_ID ?? 'master-ai-484822';
this.region = opts?.region ?? 'global';
this.temperature = opts?.temperature ?? 0.3;
}
async chat(messages, tools, maxTokens = 4096) {
const token = getVertexToken();
const base = this.region === 'global'
? 'https://aiplatform.googleapis.com'
: `https://${this.region}-aiplatform.googleapis.com`;
const url = `${base}/v1/projects/${this.projectId}/locations/${this.region}/endpoints/openapi/chat/completions`;
const body = {
model: this.modelId,
messages,
max_tokens: maxTokens,
temperature: this.temperature,
stream: false
};
if (tools && tools.length > 0) {
body.tools = tools;
body.tool_choice = 'auto';
}
const res = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(body)
});
if (!res.ok) {
const errText = await res.text();
// Force token refresh on 401
if (res.status === 401)
_tokenExpiry = 0;
throw new Error(`Vertex API ${res.status}: ${errText.slice(0, 400)}`);
}
const data = await res.json();
const choice = data.choices?.[0];
const message = choice?.message ?? {};
return {
content: message.content ?? null,
reasoning: message.reasoning_content ?? null,
tool_calls: message.tool_calls ?? [],
finish_reason: choice?.finish_reason ?? 'stop',
usage: data.usage
};
}
}
exports.VertexOpenAIClient = VertexOpenAIClient;
// ---------------------------------------------------------------------------
// Gemini client via @google/genai SDK
// Used for: Tier A (fast/cheap routing, summaries, log parsing)
// Converts to/from OpenAI message format internally.
// ---------------------------------------------------------------------------
class GeminiClient {
constructor(modelId = 'gemini-2.5-flash', opts) {
this.modelId = modelId;
this.temperature = opts?.temperature ?? 0.2;
}
async chat(messages, tools, maxTokens = 8192) {
const apiKey = process.env.GOOGLE_API_KEY;
if (!apiKey)
throw new Error('GOOGLE_API_KEY not set');
const genai = new genai_1.GoogleGenAI({ apiKey });
const systemMsg = messages.find(m => m.role === 'system');
const nonSystem = messages.filter(m => m.role !== 'system');
const functionDeclarations = (tools ?? []).map(t => ({
name: t.function.name,
description: t.function.description,
parameters: t.function.parameters
}));
const response = await genai.models.generateContent({
model: this.modelId,
contents: toGeminiContents(nonSystem),
config: {
systemInstruction: systemMsg?.content ?? undefined,
tools: functionDeclarations.length > 0 ? [{ functionDeclarations }] : undefined,
temperature: this.temperature,
maxOutputTokens: maxTokens
}
});
const candidate = response.candidates?.[0];
if (!candidate)
throw new Error('No response from Gemini');
const parts = candidate.content?.parts ?? [];
const textContent = parts.filter(p => p.text).map(p => p.text).join('') || null;
const fnCalls = parts.filter(p => p.functionCall);
const tool_calls = fnCalls.map(p => ({
id: `call_${(0, uuid_1.v4)().replace(/-/g, '').slice(0, 12)}`,
type: 'function',
function: {
name: p.functionCall.name ?? '',
arguments: JSON.stringify(p.functionCall.args ?? {})
}
}));
return {
content: textContent,
reasoning: null,
tool_calls,
finish_reason: fnCalls.length > 0 ? 'tool_calls' : 'stop'
};
}
}
exports.GeminiClient = GeminiClient;
/** Convert OpenAI message format → Gemini Content[] format */
function toGeminiContents(messages) {
const contents = [];
for (const msg of messages) {
if (msg.role === 'assistant') {
const parts = [];
if (msg.content)
parts.push({ text: msg.content });
for (const tc of msg.tool_calls ?? []) {
parts.push({
functionCall: {
name: tc.function.name,
args: JSON.parse(tc.function.arguments || '{}')
}
});
}
contents.push({ role: 'model', parts });
}
else if (msg.role === 'tool') {
// Parse content back — could be JSON or plain text
let resultValue = msg.content;
try {
resultValue = JSON.parse(msg.content ?? 'null');
}
catch { /* keep as string */ }
contents.push({
role: 'user',
parts: [{
functionResponse: {
name: msg.name ?? 'tool',
response: { result: resultValue }
}
}]
});
}
else {
contents.push({ role: 'user', parts: [{ text: msg.content ?? '' }] });
}
}
return contents;
}
const TIER_MODELS = {
A: process.env.TIER_A_MODEL ?? 'gemini-2.5-flash',
B: process.env.TIER_B_MODEL ?? 'zai-org/glm-5-maas',
C: process.env.TIER_C_MODEL ?? 'zai-org/glm-5-maas'
};
function createLLM(modelOrTier, opts) {
const modelId = (modelOrTier === 'A' || modelOrTier === 'B' || modelOrTier === 'C')
? TIER_MODELS[modelOrTier]
: modelOrTier;
if (modelId.startsWith('gemini-')) {
return new GeminiClient(modelId, opts);
}
return new VertexOpenAIClient(modelId, { temperature: opts?.temperature });
}
// ---------------------------------------------------------------------------
// Helper — convert our ToolDefinition[] → LLMTool[] (OpenAI format)
// ---------------------------------------------------------------------------
function toOAITools(tools) {
return tools.map(t => ({
type: 'function',
function: {
name: t.name,
description: t.description,
parameters: t.parameters
}
}));
}

View File

@@ -1,4 +1,5 @@
import { ToolContext } from './tools';
import { LLMMessage } from './llm';
import { ToolContext, MemoryUpdate } from './tools';
export declare function listSessions(): {
id: string;
messages: number;
@@ -6,14 +7,21 @@ export declare function listSessions(): {
lastActiveAt: string;
}[];
export declare function clearSession(sessionId: string): void;
export interface ChatMessage {
role: 'user' | 'assistant';
content: string;
}
export interface ChatResult {
reply: string;
reasoning: string | null;
sessionId: string;
turns: number;
toolCalls: string[];
model: string;
/** Updated conversation history — caller should persist this */
history: LLMMessage[];
/** Knowledge items the AI chose to save this turn */
memoryUpdates: MemoryUpdate[];
}
export declare function orchestratorChat(sessionId: string, userMessage: string, ctx: ToolContext): Promise<ChatResult>;
export declare function orchestratorChat(sessionId: string, userMessage: string, ctx: ToolContext, opts?: {
/** Pre-load history from DB — replaces in-memory session history */
preloadedHistory?: LLMMessage[];
/** Knowledge items to inject as context at start of conversation */
knowledgeContext?: string;
}): Promise<ChatResult>;

198
dist/orchestrator.js vendored
View File

@@ -3,7 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
exports.listSessions = listSessions;
exports.clearSession = clearSession;
exports.orchestratorChat = orchestratorChat;
const genai_1 = require("@google/genai");
const llm_1 = require("./llm");
const tools_1 = require("./tools");
const MAX_TURNS = 20;
const sessions = new Map();
@@ -32,131 +32,141 @@ function clearSession(sessionId) {
sessions.delete(sessionId);
}
// ---------------------------------------------------------------------------
// Orchestrator system prompt — full Vibn context
// Orchestrator system prompt
// ---------------------------------------------------------------------------
const SYSTEM_PROMPT = `You are the Master Orchestrator for Vibn — an AI-powered cloud development platform.
You are always running. You have full awareness of the Vibn project and can take autonomous action.
You run continuously and have full awareness of the Vibn project. You can take autonomous action on behalf of the user.
## What Vibn is
Vibn is a platform that lets developers build products using AI agents. It includes:
- A cloud IDE (Theia) at theia.vibnai.com
- A frontend app (Next.js) at vibnai.com
- A backend API at api.vibnai.com
- An agent runner (this system) at agents.vibnai.com
- Self-hosted Git at git.vibnai.com
- Self-hosted deployments via Coolify at coolify.vibnai.com
Vibn lets developers build products using AI agents:
- Frontend app (Next.js) at vibnai.com
- Backend API at api.vibnai.com
- Agent runner (this system) at agents.vibnai.com
- Cloud IDE (Theia) at theia.vibnai.com
- Self-hosted Git at git.vibnai.com (user: mark)
- Deployments via Coolify at coolify.vibnai.com (server: 34.19.250.135, Montreal)
## Your capabilities
You have access to tools that give you full project control:
## Your tools
**Awareness tools** (use these to understand current state):
- list_repos — see all Git repositories
- list_all_issues — see what work is open or in progress
- list_all_apps — see all deployed apps and their status
- get_app_status — check if a specific app is running and healthy
**Awareness** (understand current state first):
- list_repos — all Git repositories
- list_all_issues — open/in-progress work
- list_all_apps — deployed apps and their status
- get_app_status — health of a specific app
- read_repo_file — read any file from any repo without cloning
**Action tools** (use these to get things done):
- spawn_agent — dispatch Coder, PM, or Marketing agent to do work on a repo
- get_job_status — check if a spawned agent job is done
- deploy_app — trigger a Coolify deployment after code is committed
- gitea_create_issue — create a tracked issue (also triggers agent webhook if labelled)
- gitea_list_issues, gitea_close_issue — manage issue lifecycle
**Action** (get things done):
- spawn_agent — dispatch Coder, PM, or Marketing agent on a repo
- get_job_status — check a running agent job
- deploy_app — trigger a Coolify deployment
- gitea_create_issue — track work (label agent:coder/pm/marketing to auto-trigger)
- gitea_list_issues / gitea_close_issue — issue lifecycle
## Available agents you can spawn
- **Coder** — writes code, edits files, runs commands, commits and pushes
- **PM** — writes documentation, manages issues, creates reports
- **Marketing** — writes copy, blog posts, release notes
## Specialist agents you can spawn
- **Coder** — writes code, tests, commits, and pushes
- **PM** — docs, issues, sprint tracking
- **Marketing** — copy, release notes, blog posts
## How you work
1. When the user gives you a task, think about what needs to happen.
2. Use awareness tools first to understand current state if needed.
3. Break the task into concrete actions.
4. Spawn the right agents with detailed, specific task descriptions.
5. Check back on job status if the user wants to track progress.
6. Report clearly what was done and what's next.
1. Use awareness tools first if you need current state.
2. Break the task into concrete steps.
3. Spawn the right agent(s) with specific, detailed instructions.
4. Track and report on results.
5. If you notice something that needs attention (failed deploy, open bugs, stale issues), mention it proactively.
## Your personality
- Direct and clear. No fluff.
- Proactive — if you notice something that needs fixing, mention it.
- Honest about what you can and can't do.
- You speak for the whole system, not just one agent.
## Style
- Direct. No filler.
- Honest about uncertainty.
- When spawning agents, be specific — give them full context, not vague instructions.
- Keep responses concise unless the user needs detail.
## Important context
- All repos are owned by "mark" on git.vibnai.com
- The main repos are: vibn-frontend, vibn-api, vibn-agent-runner, theia-code-os
- The stack: Next.js (frontend), Node.js (API + agent runner), Theia (IDE)
- Coolify manages all deployments on server 34.19.250.135 (Montreal)
- Agent label routing: agent:coder, agent:pm, agent:marketing on Gitea issues`;
async function orchestratorChat(sessionId, userMessage, ctx) {
const apiKey = process.env.GOOGLE_API_KEY;
if (!apiKey)
throw new Error('GOOGLE_API_KEY not set');
const genai = new genai_1.GoogleGenAI({ apiKey });
## Security
- Never spawn agents on: mark/vibn-frontend, mark/theia-code-os, mark/vibn-agent-runner, mark/vibn-api, mark/master-ai
- Those are protected platform repos — read-only for you, not writable by agents.`;
// ---------------------------------------------------------------------------
// Main orchestrator chat — uses GLM-5 (Tier B) by default
// ---------------------------------------------------------------------------
async function orchestratorChat(sessionId, userMessage, ctx, opts) {
const modelId = process.env.ORCHESTRATOR_MODEL ?? 'B'; // Tier B = GLM-5
const llm = (0, llm_1.createLLM)(modelId, { temperature: 0.3 });
const session = getOrCreateSession(sessionId);
// Orchestrator gets ALL tools
const functionDeclarations = tools_1.ALL_TOOLS.map(t => ({
name: t.name,
description: t.description,
parameters: t.parameters
}));
// Add user message to history
session.history.push({ role: 'user', parts: [{ text: userMessage }] });
// Seed session from DB history if provided and session is fresh
if (opts?.preloadedHistory && opts.preloadedHistory.length > 0 && session.history.length === 0) {
session.history = [...opts.preloadedHistory];
}
const oaiTools = (0, llm_1.toOAITools)(tools_1.ALL_TOOLS);
// Append user message
session.history.push({ role: 'user', content: userMessage });
let turn = 0;
let finalReply = '';
let finalReasoning = null;
const toolCallNames = [];
// Build messages with system prompt prepended
const buildMessages = () => [
{ role: 'system', content: SYSTEM_PROMPT },
...session.history
];
while (turn < MAX_TURNS) {
turn++;
const response = await genai.models.generateContent({
model: 'gemini-2.5-flash',
contents: session.history,
config: {
systemInstruction: SYSTEM_PROMPT,
tools: [{ functionDeclarations }],
temperature: 0.3,
maxOutputTokens: 8192
}
});
const candidate = response.candidates?.[0];
if (!candidate)
throw new Error('No response from Gemini');
const modelContent = {
role: 'model',
parts: candidate.content?.parts || []
const response = await llm.chat(buildMessages(), oaiTools, 4096);
// If GLM-5 is still reasoning (content null, finish_reason length) give it more tokens
if (response.content === null && response.tool_calls.length === 0 && response.finish_reason === 'length') {
// Retry with more tokens — model hit max_tokens during reasoning
const retry = await llm.chat(buildMessages(), oaiTools, 8192);
Object.assign(response, retry);
}
// Record reasoning for the final turn (informational, not stored in history)
if (response.reasoning)
finalReasoning = response.reasoning;
// Build assistant message to add to history
const assistantMsg = {
role: 'assistant',
content: response.content,
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
};
session.history.push(modelContent);
const functionCalls = candidate.content?.parts?.filter(p => p.functionCall) ?? [];
// No more tool calls — we have the final answer
if (functionCalls.length === 0) {
finalReply = candidate.content?.parts
?.filter(p => p.text)
.map(p => p.text)
.join('') ?? '';
session.history.push(assistantMsg);
// No tool calls — we have the final answer
if (response.tool_calls.length === 0) {
finalReply = response.content ?? '';
break;
}
// Execute tool calls
const toolResultParts = [];
for (const part of functionCalls) {
const call = part.functionCall;
const callName = call.name ?? 'unknown';
const callArgs = (call.args ?? {});
toolCallNames.push(callName);
// Execute each tool call and collect results
for (const tc of response.tool_calls) {
const fnName = tc.function.name;
let fnArgs = {};
try {
fnArgs = JSON.parse(tc.function.arguments || '{}');
}
catch { /* bad JSON */ }
toolCallNames.push(fnName);
let result;
try {
result = await (0, tools_1.executeTool)(callName, callArgs, ctx);
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
}
catch (err) {
result = { error: err instanceof Error ? err.message : String(err) };
}
toolResultParts.push({
functionResponse: { name: callName, response: { result } }
// Add tool result to history
session.history.push({
role: 'tool',
tool_call_id: tc.id,
name: fnName,
content: typeof result === 'string' ? result : JSON.stringify(result)
});
}
session.history.push({ role: 'user', parts: toolResultParts });
}
if (turn >= MAX_TURNS && !finalReply) {
finalReply = 'I hit the turn limit. Please try a more specific request.';
finalReply = 'Hit the turn limit. Try a more specific request.';
}
return { reply: finalReply, sessionId, turns: turn, toolCalls: toolCallNames };
return {
reply: finalReply,
reasoning: finalReasoning,
sessionId,
turns: turn,
toolCalls: toolCallNames,
model: llm.modelId,
history: session.history.slice(-40),
memoryUpdates: ctx.memoryUpdates
};
}

38
dist/server.js vendored
View File

@@ -46,8 +46,17 @@ const job_store_1 = require("./job-store");
const agent_runner_1 = require("./agent-runner");
const agents_1 = require("./agents");
const orchestrator_1 = require("./orchestrator");
// Protected Vibn platform repos — agents cannot clone or work in these workspaces
const PROTECTED_GITEA_REPOS = new Set([
'mark/vibn-frontend',
'mark/theia-code-os',
'mark/vibn-agent-runner',
'mark/vibn-api',
'mark/master-ai',
]);
const app = (0, express_1.default)();
app.use((0, cors_1.default)());
const startTime = new Date();
// Raw body capture for webhook HMAC — must come before express.json()
app.use('/webhook/gitea', express_1.default.raw({ type: '*/*' }));
app.use(express_1.default.json());
@@ -62,6 +71,10 @@ function ensureWorkspace(repo) {
fs.mkdirSync(dir, { recursive: true });
return dir;
}
if (PROTECTED_GITEA_REPOS.has(repo)) {
throw new Error(`SECURITY: Repo "${repo}" is a protected Vibn platform repo. ` +
`Agents cannot clone or work in this workspace.`);
}
const dir = path.join(base, repo.replace('/', '_'));
const gitea = {
apiUrl: process.env.GITEA_API_URL || '',
@@ -95,7 +108,8 @@ function buildContext(repo) {
coolify: {
apiUrl: process.env.COOLIFY_API_URL || '',
apiToken: process.env.COOLIFY_API_TOKEN || ''
}
},
memoryUpdates: []
};
}
// ---------------------------------------------------------------------------
@@ -114,6 +128,28 @@ app.get('/api/agents', (_req, res) => {
}));
res.json(agents);
});
// Get server status and job statistics
app.get('/api/status', (_req, res) => {
const allJobs = (0, job_store_1.listJobs)(Infinity);
const total_jobs = allJobs.length;
const by_status = {
queued: 0,
running: 0,
completed: 0,
failed: 0,
};
for (const job of allJobs) {
by_status[job.status] = (by_status[job.status] || 0) + 1;
}
const uptime_seconds = Math.floor((new Date().getTime() - startTime.getTime()) / 1000);
const agents = Object.values(agents_1.AGENTS).map(a => a.name);
res.json({
total_jobs,
by_status,
uptime_seconds,
agents,
});
});
// Submit a new job
app.post('/api/agent/run', async (req, res) => {
const { agent: agentName, task, repo } = req.body;

1
dist/test.d.ts vendored Normal file
View File

@@ -0,0 +1 @@
export {};

13
dist/test.js vendored Normal file
View File

@@ -0,0 +1,13 @@
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const assert_1 = __importDefault(require("assert"));
function add(a, b) {
return a + b;
}
assert_1.default.strictEqual(add(1, 2), 3, 'add(1, 2) should be 3');
assert_1.default.strictEqual(add(0, 0), 0, 'add(0, 0) should be 0');
assert_1.default.strictEqual(add(-1, 1), 0, 'add(-1, 1) should be 0');
console.log('All tests passed!');

7
dist/tools.d.ts vendored
View File

@@ -1,3 +1,8 @@
export interface MemoryUpdate {
key: string;
type: string;
value: string;
}
export interface ToolContext {
workspaceRoot: string;
gitea: {
@@ -9,6 +14,8 @@ export interface ToolContext {
apiUrl: string;
apiToken: string;
};
/** Accumulated memory updates from save_memory tool calls in this turn */
memoryUpdates: MemoryUpdate[];
}
export interface ToolDefinition {
name: string;

117
dist/tools.js vendored
View File

@@ -41,6 +41,45 @@ const cp = __importStar(require("child_process"));
const util = __importStar(require("util"));
const minimatch_1 = require("minimatch");
const execAsync = util.promisify(cp.exec);
// =============================================================================
// SECURITY GUARDRAILS — Protected VIBN Platform Resources
//
// These repos and Coolify resources belong to the Vibn platform itself.
// Agents must never be allowed to push code or trigger deployments here.
// Read-only operations (list, read file, get status) are still permitted
// so agents can observe the platform state, but all mutations are blocked.
// =============================================================================
/** Gitea repos that agents can NEVER push to, commit to, or write issues on. */
const PROTECTED_GITEA_REPOS = new Set([
'mark/vibn-frontend',
'mark/theia-code-os',
'mark/vibn-agent-runner',
'mark/vibn-api',
'mark/master-ai',
]);
/** Coolify project UUID for the VIBN platform — agents cannot deploy here. */
const PROTECTED_COOLIFY_PROJECT = 'f4owwggokksgw0ogo0844os0';
/**
* Specific Coolify app UUIDs that must never be deployed by an agent.
* This is a belt-and-suspenders check in case the project UUID filter is bypassed.
*/
const PROTECTED_COOLIFY_APPS = new Set([
'y4cscsc8s08c8808go0448s0', // vibn-frontend
'kggs4ogckc0w8ggwkkk88kck', // vibn-postgres
'o4wwck0g0c04wgoo4g4s0004', // gitea
]);
function assertGiteaWritable(repo) {
if (PROTECTED_GITEA_REPOS.has(repo)) {
throw new Error(`SECURITY: Repo "${repo}" is a protected Vibn platform repo. ` +
`Agents cannot push code or modify issues in this repository.`);
}
}
function assertCoolifyDeployable(appUuid) {
if (PROTECTED_COOLIFY_APPS.has(appUuid)) {
throw new Error(`SECURITY: App "${appUuid}" is a protected Vibn platform application. ` +
`Agents cannot trigger deployments for this application.`);
}
}
exports.ALL_TOOLS = [
{
name: 'read_file',
@@ -296,6 +335,23 @@ exports.ALL_TOOLS = [
},
required: ['app_name']
}
},
{
name: 'save_memory',
description: 'Persist an important fact about this project to long-term memory. Use this to save decisions, tech stack choices, feature descriptions, constraints, or goals so they are remembered across conversations.',
parameters: {
type: 'object',
properties: {
key: { type: 'string', description: 'Short unique label (e.g. "primary_language", "auth_strategy", "deploy_target")' },
type: {
type: 'string',
enum: ['tech_stack', 'decision', 'feature', 'goal', 'constraint', 'note'],
description: 'Category of the memory item'
},
value: { type: 'string', description: 'The fact to remember (1-3 sentences)' }
},
required: ['key', 'type', 'value']
}
}
];
// ---------------------------------------------------------------------------
@@ -452,6 +508,21 @@ async function gitCommitAndPush(message, ctx) {
const cwd = ctx.workspaceRoot;
const { apiUrl, apiToken, username } = ctx.gitea;
try {
// Check the remote URL before committing — block pushes to protected repos
let remoteCheck = '';
try {
remoteCheck = (await execAsync('git remote get-url origin', { cwd })).stdout.trim();
}
catch { /* ok */ }
for (const protectedRepo of PROTECTED_GITEA_REPOS) {
const repoPath = protectedRepo.replace('mark/', '');
if (remoteCheck.includes(`/${repoPath}`) || remoteCheck.includes(`/${repoPath}.git`)) {
return {
error: `SECURITY: This workspace is linked to a protected Vibn platform repo (${protectedRepo}). ` +
`Agents cannot push to platform repos. Only user project repos are writable.`
};
}
}
await execAsync('git add -A', { cwd });
await execAsync(`git commit -m "${message.replace(/"/g, '\\"')}"`, { cwd });
// Get current remote URL, strip any existing credentials, re-inject cleanly
@@ -493,7 +564,11 @@ async function coolifyFetch(path, ctx, method = 'GET', body) {
return res.json();
}
async function coolifyListProjects(ctx) {
return coolifyFetch('/projects', ctx);
const projects = await coolifyFetch('/projects', ctx);
if (!Array.isArray(projects))
return projects;
// Filter out the protected VIBN project entirely — agents don't need to see it
return projects.filter((p) => p.uuid !== PROTECTED_COOLIFY_PROJECT);
}
async function coolifyListApplications(projectUuid, ctx) {
const all = await coolifyFetch('/applications', ctx);
@@ -502,6 +577,15 @@ async function coolifyListApplications(projectUuid, ctx) {
return all.filter((a) => a.project_uuid === projectUuid);
}
async function coolifyDeploy(appUuid, ctx) {
assertCoolifyDeployable(appUuid);
// Also check the app belongs to the right project
const apps = await coolifyFetch('/applications', ctx);
if (Array.isArray(apps)) {
const app = apps.find((a) => a.uuid === appUuid);
if (app?.project_uuid === PROTECTED_COOLIFY_PROJECT) {
return { error: `SECURITY: App "${appUuid}" belongs to the protected Vibn project. Agents cannot deploy platform apps.` };
}
}
return coolifyFetch(`/applications/${appUuid}/deploy`, ctx, 'POST');
}
async function coolifyGetLogs(appUuid, ctx) {
@@ -525,12 +609,14 @@ async function giteaFetch(path, ctx, method = 'GET', body) {
return res.json();
}
async function giteaCreateIssue(repo, title, body, labels, ctx) {
assertGiteaWritable(repo);
return giteaFetch(`/repos/${repo}/issues`, ctx, 'POST', { title, body, labels });
}
async function giteaListIssues(repo, state, ctx) {
return giteaFetch(`/repos/${repo}/issues?state=${state}&limit=20`, ctx);
}
async function giteaCloseIssue(repo, issueNumber, ctx) {
assertGiteaWritable(repo);
return giteaFetch(`/repos/${repo}/issues/${issueNumber}`, ctx, 'PATCH', { state: 'closed' });
}
// ---------------------------------------------------------------------------
@@ -560,7 +646,10 @@ async function listRepos(ctx) {
headers: { 'Authorization': `token ${ctx.gitea.apiToken}` }
});
const data = await res.json();
return (data.data || []).map((r) => ({
return (data.data || [])
// Hide protected platform repos from agent's view entirely
.filter((r) => !PROTECTED_GITEA_REPOS.has(r.full_name))
.map((r) => ({
name: r.full_name,
description: r.description,
default_branch: r.default_branch,
@@ -571,9 +660,12 @@ async function listRepos(ctx) {
}
async function listAllIssues(repo, state, ctx) {
if (repo) {
if (PROTECTED_GITEA_REPOS.has(repo)) {
return { error: `SECURITY: "${repo}" is a protected Vibn platform repo. Agents cannot access its issues.` };
}
return giteaFetch(`/repos/${repo}/issues?state=${state}&limit=20`, ctx);
}
// Fetch across all repos
// Fetch across all non-protected repos
const repos = await listRepos(ctx);
const allIssues = [];
for (const r of repos.slice(0, 10)) {
@@ -595,7 +687,10 @@ async function listAllApps(ctx) {
const apps = await coolifyFetch('/applications', ctx);
if (!Array.isArray(apps))
return apps;
return apps.map((a) => ({
return apps
// Filter out apps that belong to the protected VIBN project
.filter((a) => a.project_uuid !== PROTECTED_COOLIFY_PROJECT && !PROTECTED_COOLIFY_APPS.has(a.uuid))
.map((a) => ({
uuid: a.uuid,
name: a.name,
fqdn: a.fqdn,
@@ -611,6 +706,9 @@ async function getAppStatus(appName, ctx) {
const app = apps.find((a) => a.name?.toLowerCase() === appName.toLowerCase() || a.uuid === appName);
if (!app)
return { error: `App "${appName}" not found` };
if (PROTECTED_COOLIFY_APPS.has(app.uuid) || app.project_uuid === PROTECTED_COOLIFY_PROJECT) {
return { error: `SECURITY: "${appName}" is a protected Vibn platform app. Status is not exposed to agents.` };
}
const logs = await coolifyFetch(`/applications/${app.uuid}/logs?limit=20`, ctx);
return { name: app.name, uuid: app.uuid, status: app.status, fqdn: app.fqdn, logs };
}
@@ -648,6 +746,10 @@ async function getJobStatus(jobId) {
return { error: `Failed to get job: ${err instanceof Error ? err.message : String(err)}` };
}
}
function saveMemory(key, type, value, ctx) {
ctx.memoryUpdates.push({ key, type, value });
return { saved: true, key, type };
}
async function deployApp(appName, ctx) {
const apps = await coolifyFetch('/applications', ctx);
if (!Array.isArray(apps))
@@ -655,6 +757,13 @@ async function deployApp(appName, ctx) {
const app = apps.find((a) => a.name?.toLowerCase() === appName.toLowerCase() || a.uuid === appName);
if (!app)
return { error: `App "${appName}" not found` };
// Block deployment to protected VIBN platform apps
if (PROTECTED_COOLIFY_APPS.has(app.uuid) || app.project_uuid === PROTECTED_COOLIFY_PROJECT) {
return {
error: `SECURITY: "${appName}" is a protected Vibn platform application. ` +
`Agents can only deploy user project apps, not platform infrastructure.`
};
}
const result = await fetch(`${ctx.coolify.apiUrl}/api/v1/deploy?uuid=${app.uuid}&force=false`, {
headers: { 'Authorization': `Bearer ${ctx.coolify.apiToken}` }
});