fix: compile dist from source in Docker, fix ChatResult interface
- Dockerfile now runs tsc during build so committed dist/ is never stale - ChatResult interface was missing history[] and memoryUpdates[] fields - Re-add missing MemoryUpdate import in orchestrator.ts - Rebuild dist/ with all new fields included Made-with: Cursor
This commit is contained in:
13
Dockerfile
13
Dockerfile
@@ -9,12 +9,17 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Install dependencies first (layer cache)
|
# Install all deps (including devDeps for tsc)
|
||||||
COPY package*.json ./
|
COPY package*.json ./
|
||||||
RUN npm ci --omit=dev
|
RUN npm ci
|
||||||
|
|
||||||
# Copy compiled output (build before docker build, or use multi-stage)
|
# Copy source and compile
|
||||||
COPY dist/ ./dist/
|
COPY tsconfig.json ./
|
||||||
|
COPY src/ ./src/
|
||||||
|
RUN npm run build
|
||||||
|
|
||||||
|
# Prune dev deps after build
|
||||||
|
RUN npm prune --omit=dev
|
||||||
|
|
||||||
# Create workspace dir and non-root user
|
# Create workspace dir and non-root user
|
||||||
RUN useradd -r -m -s /bin/bash agent && \
|
RUN useradd -r -m -s /bin/bash agent && \
|
||||||
|
|||||||
8
dist/agent-runner.d.ts
vendored
8
dist/agent-runner.d.ts
vendored
@@ -5,12 +5,12 @@ export interface RunResult {
|
|||||||
finalText: string;
|
finalText: string;
|
||||||
toolCallCount: number;
|
toolCallCount: number;
|
||||||
turns: number;
|
turns: number;
|
||||||
|
model: string;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Core Gemini agent loop.
|
* Core agent execution loop — model-agnostic via the unified LLM client.
|
||||||
*
|
*
|
||||||
* Sends the task to Gemini with the agent's system prompt and tools,
|
* Agents use their configured model tier (A/B/C) or a specific model ID.
|
||||||
* then loops: execute tool calls → send results back → repeat until
|
* Tool calling uses OpenAI format throughout.
|
||||||
* the model stops calling tools or MAX_TURNS is reached.
|
|
||||||
*/
|
*/
|
||||||
export declare function runAgent(job: Job, config: AgentConfig, task: string, ctx: ToolContext): Promise<RunResult>;
|
export declare function runAgent(job: Job, config: AgentConfig, task: string, ctx: ToolContext): Promise<RunResult>;
|
||||||
|
|||||||
125
dist/agent-runner.js
vendored
125
dist/agent-runner.js
vendored
@@ -1,117 +1,80 @@
|
|||||||
"use strict";
|
"use strict";
|
||||||
Object.defineProperty(exports, "__esModule", { value: true });
|
Object.defineProperty(exports, "__esModule", { value: true });
|
||||||
exports.runAgent = runAgent;
|
exports.runAgent = runAgent;
|
||||||
const genai_1 = require("@google/genai");
|
const llm_1 = require("./llm");
|
||||||
const tools_1 = require("./tools");
|
const tools_1 = require("./tools");
|
||||||
const job_store_1 = require("./job-store");
|
const job_store_1 = require("./job-store");
|
||||||
const MAX_TURNS = 40; // safety cap — prevents infinite loops
|
const MAX_TURNS = 40;
|
||||||
/**
|
/**
|
||||||
* Core Gemini agent loop.
|
* Core agent execution loop — model-agnostic via the unified LLM client.
|
||||||
*
|
*
|
||||||
* Sends the task to Gemini with the agent's system prompt and tools,
|
* Agents use their configured model tier (A/B/C) or a specific model ID.
|
||||||
* then loops: execute tool calls → send results back → repeat until
|
* Tool calling uses OpenAI format throughout.
|
||||||
* the model stops calling tools or MAX_TURNS is reached.
|
|
||||||
*/
|
*/
|
||||||
async function runAgent(job, config, task, ctx) {
|
async function runAgent(job, config, task, ctx) {
|
||||||
const apiKey = process.env.GOOGLE_API_KEY;
|
const llm = (0, llm_1.createLLM)(config.model, { temperature: 0.2 });
|
||||||
if (!apiKey) {
|
const oaiTools = (0, llm_1.toOAITools)(config.tools);
|
||||||
throw new Error('GOOGLE_API_KEY environment variable is not set');
|
const history = [
|
||||||
}
|
{ role: 'user', content: task }
|
||||||
const genai = new genai_1.GoogleGenAI({ apiKey });
|
];
|
||||||
// Build Gemini function declarations from our tool definitions
|
|
||||||
const functionDeclarations = config.tools.map(tool => ({
|
|
||||||
name: tool.name,
|
|
||||||
description: tool.description,
|
|
||||||
parameters: tool.parameters
|
|
||||||
}));
|
|
||||||
const tools = functionDeclarations.length > 0
|
|
||||||
? [{ functionDeclarations }]
|
|
||||||
: [];
|
|
||||||
const model = genai.models;
|
|
||||||
// Build conversation history
|
|
||||||
const history = [];
|
|
||||||
// Initial user message
|
|
||||||
let currentMessage = {
|
|
||||||
role: 'user',
|
|
||||||
parts: [{ text: task }]
|
|
||||||
};
|
|
||||||
let toolCallCount = 0;
|
let toolCallCount = 0;
|
||||||
let turn = 0;
|
let turn = 0;
|
||||||
let finalText = '';
|
let finalText = '';
|
||||||
(0, job_store_1.updateJob)(job.id, { status: 'running', progress: `Starting ${config.name} agent...` });
|
(0, job_store_1.updateJob)(job.id, { status: 'running', progress: `Starting ${config.name} (${llm.modelId})…` });
|
||||||
while (turn < MAX_TURNS) {
|
while (turn < MAX_TURNS) {
|
||||||
turn++;
|
turn++;
|
||||||
// Add current message to history
|
const messages = [
|
||||||
history.push(currentMessage);
|
{ role: 'system', content: config.systemPrompt },
|
||||||
// Call Gemini
|
...history
|
||||||
const response = await model.generateContent({
|
];
|
||||||
model: config.model || 'gemini-2.0-flash',
|
const response = await llm.chat(messages, oaiTools, 8192);
|
||||||
contents: history,
|
// Build assistant message for history
|
||||||
config: {
|
const assistantMsg = {
|
||||||
systemInstruction: config.systemPrompt,
|
role: 'assistant',
|
||||||
tools: tools.length > 0 ? tools : undefined,
|
content: response.content,
|
||||||
temperature: 0.2,
|
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
|
||||||
maxOutputTokens: 8192
|
|
||||||
}
|
|
||||||
});
|
|
||||||
const candidate = response.candidates?.[0];
|
|
||||||
if (!candidate) {
|
|
||||||
throw new Error('No response from Gemini');
|
|
||||||
}
|
|
||||||
// Add model response to history
|
|
||||||
const modelContent = {
|
|
||||||
role: 'model',
|
|
||||||
parts: candidate.content?.parts || []
|
|
||||||
};
|
};
|
||||||
history.push(modelContent);
|
history.push(assistantMsg);
|
||||||
// Extract function calls from the response
|
// No tool calls — agent is done
|
||||||
const functionCalls = candidate.content?.parts?.filter(p => p.functionCall) ?? [];
|
if (response.tool_calls.length === 0) {
|
||||||
if (functionCalls.length === 0) {
|
finalText = response.content ?? '';
|
||||||
// No tool calls — the agent is done
|
|
||||||
finalText = candidate.content?.parts
|
|
||||||
?.filter(p => p.text)
|
|
||||||
.map(p => p.text)
|
|
||||||
.join('') ?? '';
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Execute all tool calls
|
// Execute tool calls
|
||||||
const toolResultParts = [];
|
for (const tc of response.tool_calls) {
|
||||||
for (const part of functionCalls) {
|
const fnName = tc.function.name;
|
||||||
const call = part.functionCall;
|
let fnArgs = {};
|
||||||
const callName = call.name ?? 'unknown';
|
try {
|
||||||
const callArgs = (call.args ?? {});
|
fnArgs = JSON.parse(tc.function.arguments || '{}');
|
||||||
|
}
|
||||||
|
catch { /* bad JSON */ }
|
||||||
toolCallCount++;
|
toolCallCount++;
|
||||||
(0, job_store_1.updateJob)(job.id, {
|
(0, job_store_1.updateJob)(job.id, {
|
||||||
progress: `Turn ${turn}: calling ${callName}...`,
|
progress: `Turn ${turn}: calling ${fnName}…`,
|
||||||
toolCalls: [...(job.toolCalls || []), {
|
toolCalls: [...(job.toolCalls || []), {
|
||||||
turn,
|
turn,
|
||||||
tool: callName,
|
tool: fnName,
|
||||||
args: callArgs,
|
args: fnArgs,
|
||||||
timestamp: new Date().toISOString()
|
timestamp: new Date().toISOString()
|
||||||
}]
|
}]
|
||||||
});
|
});
|
||||||
let result;
|
let result;
|
||||||
try {
|
try {
|
||||||
result = await (0, tools_1.executeTool)(callName, callArgs, ctx);
|
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
result = { error: err instanceof Error ? err.message : String(err) };
|
result = { error: err instanceof Error ? err.message : String(err) };
|
||||||
}
|
}
|
||||||
toolResultParts.push({
|
history.push({
|
||||||
functionResponse: {
|
role: 'tool',
|
||||||
name: callName,
|
tool_call_id: tc.id,
|
||||||
response: { result }
|
name: fnName,
|
||||||
}
|
content: typeof result === 'string' ? result : JSON.stringify(result)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
// Next turn: send tool results back to the model
|
|
||||||
currentMessage = {
|
|
||||||
role: 'user',
|
|
||||||
parts: toolResultParts
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
if (turn >= MAX_TURNS && !finalText) {
|
if (turn >= MAX_TURNS && !finalText) {
|
||||||
finalText = `Agent reached the ${MAX_TURNS}-turn safety limit. Last tool call count: ${toolCallCount}.`;
|
finalText = `Agent hit the ${MAX_TURNS}-turn safety limit. Tool calls made: ${toolCallCount}.`;
|
||||||
}
|
}
|
||||||
return { finalText, toolCallCount, turns: turn };
|
return { finalText, toolCallCount, turns: turn, model: llm.modelId };
|
||||||
}
|
}
|
||||||
|
|||||||
124
dist/agents.js
vendored
124
dist/agents.js
vendored
@@ -13,108 +13,104 @@ function pick(names) {
|
|||||||
}
|
}
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Agent definitions
|
// Agent definitions
|
||||||
|
//
|
||||||
|
// model is a tier ('A' | 'B' | 'C') or a specific model ID.
|
||||||
|
// Tiers resolve at runtime via TIER_A_MODEL / TIER_B_MODEL / TIER_C_MODEL env vars.
|
||||||
|
//
|
||||||
|
// Tier A = gemini-2.5-flash — fast, cheap: routing, summaries, monitoring
|
||||||
|
// Tier B = zai-org/glm-5-maas — workhorse coding model
|
||||||
|
// Tier C = zai-org/glm-5-maas — complex decisions (or Claude Sonnet via TIER_C_MODEL)
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
exports.AGENTS = {
|
exports.AGENTS = {
|
||||||
Orchestrator: {
|
Orchestrator: {
|
||||||
name: 'Orchestrator',
|
name: 'Orchestrator',
|
||||||
description: 'Master coordinator that breaks down high-level goals and delegates to specialist agents',
|
description: 'Master coordinator — breaks down goals and delegates to specialist agents',
|
||||||
model: 'gemini-2.5-flash',
|
model: 'B', // GLM-5 — good planner, chain-of-thought reasoning
|
||||||
systemPrompt: `You are the Orchestrator for Vibn, an autonomous AI system for software development.
|
systemPrompt: `You are the Orchestrator for Vibn, an autonomous AI platform for software development.
|
||||||
|
|
||||||
Your role is to:
|
Your role:
|
||||||
1. Understand the high-level goal provided in the task.
|
1. Understand the high-level goal.
|
||||||
2. Break it down into concrete sub-tasks.
|
2. Break it into concrete sub-tasks.
|
||||||
3. Delegate sub-tasks to the appropriate specialist agents using the spawn_agent tool.
|
3. Delegate to the right specialist agents via spawn_agent.
|
||||||
4. Use Gitea to track progress: create an issue at the start, close it when done.
|
4. Track progress via Gitea issues.
|
||||||
5. Summarize what was done when complete.
|
5. Summarize results when done.
|
||||||
|
|
||||||
Available specialist agents and when to use them:
|
Agents available:
|
||||||
- **Coder**: Any code changes — features, bug fixes, refactors, tests.
|
- Coder: code changes, features, bug fixes, tests.
|
||||||
- **PM**: Project management — issue triage, sprint planning, documentation updates.
|
- PM: issue triage, docs, sprint planning.
|
||||||
- **Marketing**: Content and copy — blog posts, landing page copy, release notes.
|
- Marketing: copy, blog posts, release notes.
|
||||||
|
|
||||||
Rules:
|
Rules:
|
||||||
- Always create a Gitea issue first to track the work.
|
- Create a Gitea issue first to track the work.
|
||||||
- Delegate to ONE agent at a time unless tasks are fully independent.
|
- Delegate one agent at a time unless tasks are fully independent.
|
||||||
- Check back on progress by listing issues.
|
- Never write code yourself — delegate to Coder.
|
||||||
- Never try to write code yourself — delegate to Coder.
|
- Be specific in task descriptions when spawning agents.`,
|
||||||
- Be concise in your task descriptions when spawning agents.`,
|
|
||||||
tools: pick([...GITEA_TOOLS, ...SPAWN_TOOL, ...COOLIFY_TOOLS])
|
tools: pick([...GITEA_TOOLS, ...SPAWN_TOOL, ...COOLIFY_TOOLS])
|
||||||
},
|
},
|
||||||
Coder: {
|
Coder: {
|
||||||
name: 'Coder',
|
name: 'Coder',
|
||||||
description: 'Senior software engineer — writes, edits, and tests code. Commits and pushes when done.',
|
description: 'Senior software engineer — writes, edits, tests, commits, and pushes code',
|
||||||
model: 'gemini-2.5-flash',
|
model: 'B', // GLM-5 — strong at code generation and diffs
|
||||||
systemPrompt: `You are an expert senior software engineer working autonomously on a git repository.
|
systemPrompt: `You are an expert senior software engineer working autonomously on a Git repository.
|
||||||
|
|
||||||
Your job is to complete the coding task given to you. Follow these rules:
|
Workflow:
|
||||||
|
1. Explore the codebase: list_directory, find_files, read_file.
|
||||||
**Workflow:**
|
2. Search for patterns: search_code.
|
||||||
1. Start by exploring the codebase: list_directory, find_files, read_file to understand structure.
|
|
||||||
2. Search for relevant code: search_code to find existing patterns.
|
|
||||||
3. Plan your changes before making them.
|
3. Plan your changes before making them.
|
||||||
4. Read every file BEFORE editing it.
|
4. Read every file BEFORE editing it.
|
||||||
5. Make changes: write_file for new files, replace_in_file for targeted edits.
|
5. Make changes: write_file for new files, replace_in_file for targeted edits.
|
||||||
6. Run tests or lint if applicable: execute_command.
|
6. Run tests/lint if applicable: execute_command.
|
||||||
7. Commit and push when the task is complete: git_commit_and_push.
|
7. Commit and push when complete: git_commit_and_push.
|
||||||
|
|
||||||
**Code quality rules:**
|
Code quality:
|
||||||
- Match existing code style exactly.
|
- Match existing style exactly.
|
||||||
- Never leave TODO comments — implement or skip.
|
- No TODO comments — implement or skip.
|
||||||
- Write complete files, not partial snippets.
|
- Write complete files, not partial snippets.
|
||||||
- If tests exist, run them and fix failures before committing.
|
- Run tests and fix failures before committing.
|
||||||
- Commit message should be concise and in imperative mood (e.g. "add user authentication").
|
- Commit messages: imperative mood, concise (e.g. "add user authentication").
|
||||||
|
|
||||||
**Safety rules:**
|
Safety:
|
||||||
- Never delete files unless explicitly instructed.
|
- Never delete files unless explicitly told to.
|
||||||
- Never modify .env files or credentials.
|
- Never touch .env files or credentials.
|
||||||
- Never commit secrets or API keys.
|
- Never commit secrets or API keys.
|
||||||
|
|
||||||
**If you were triggered by a Gitea issue:**
|
If triggered by a Gitea issue: close it with gitea_close_issue after committing.`,
|
||||||
- After committing, close the issue using gitea_close_issue.
|
|
||||||
- The repo name is in the format "owner/name".
|
|
||||||
|
|
||||||
Be methodical. Read before you write. Test before you commit.`,
|
|
||||||
tools: pick([...FILE_TOOLS, ...SHELL_TOOLS, ...GIT_TOOLS, ...GITEA_TOOLS])
|
tools: pick([...FILE_TOOLS, ...SHELL_TOOLS, ...GIT_TOOLS, ...GITEA_TOOLS])
|
||||||
},
|
},
|
||||||
PM: {
|
PM: {
|
||||||
name: 'PM',
|
name: 'PM',
|
||||||
description: 'Product manager — manages Gitea issues, writes documentation, tracks project health',
|
description: 'Product manager — docs, issue management, project health reports',
|
||||||
model: 'gemini-2.5-flash',
|
model: 'A', // Gemini Flash — lightweight, cheap for docs/issue work
|
||||||
systemPrompt: `You are an autonomous Product Manager for a software project hosted on Gitea.
|
systemPrompt: `You are an autonomous Product Manager for a software project hosted on Gitea.
|
||||||
|
|
||||||
Your responsibilities:
|
Responsibilities:
|
||||||
1. Create, update, and close Gitea issues to track work.
|
1. Create, update, and close Gitea issues.
|
||||||
2. Write and update documentation files in the repository.
|
2. Write and update docs in the repository.
|
||||||
3. Summarize project state and create reports.
|
3. Summarize project state and create reports.
|
||||||
4. Prioritize and triage bugs/features based on impact.
|
4. Triage bugs and features by impact.
|
||||||
|
|
||||||
When writing documentation:
|
When writing docs:
|
||||||
- Be clear and concise.
|
- Clear and concise.
|
||||||
- Use markdown formatting.
|
- Markdown formatting.
|
||||||
- Focus on what users and developers need to know.
|
- Keep docs in sync with the codebase.
|
||||||
- Keep docs up to date with the actual codebase state.
|
- Always commit after writing.`,
|
||||||
|
|
||||||
Always commit documentation updates after writing them.`,
|
|
||||||
tools: pick([...GITEA_TOOLS, ...FILE_TOOLS, ...GIT_TOOLS])
|
tools: pick([...GITEA_TOOLS, ...FILE_TOOLS, ...GIT_TOOLS])
|
||||||
},
|
},
|
||||||
Marketing: {
|
Marketing: {
|
||||||
name: 'Marketing',
|
name: 'Marketing',
|
||||||
description: 'Marketing specialist — writes copy, blog posts, release notes, and landing page content',
|
description: 'Marketing specialist — copy, blog posts, release notes, landing page content',
|
||||||
model: 'gemini-2.5-flash',
|
model: 'A', // Gemini Flash — cheap for content generation
|
||||||
systemPrompt: `You are an autonomous Marketing specialist for a SaaS product called Vibn.
|
systemPrompt: `You are an autonomous Marketing specialist for a SaaS product called Vibn.
|
||||||
|
|
||||||
Vibn is a cloud-based AI-powered development environment. It helps development teams build faster with AI agents that can write code, manage projects, and deploy automatically.
|
Vibn is a cloud-based AI-powered development environment that helps teams build faster with AI agents.
|
||||||
|
|
||||||
Your responsibilities:
|
Responsibilities:
|
||||||
1. Write compelling marketing copy for landing pages, email campaigns, and social media.
|
1. Write landing page copy, emails, and social media content.
|
||||||
2. Write technical blog posts that explain features in an accessible way.
|
2. Write technical blog posts explaining features accessibly.
|
||||||
3. Write release notes that highlight user-facing value.
|
3. Write release notes that highlight user-facing value.
|
||||||
4. Ensure all copy is on-brand: professional, clear, forward-thinking, and developer-friendly.
|
4. Maintain brand voice: smart, confident, practical. No hype, no jargon.
|
||||||
|
|
||||||
Brand voice: Smart, confident, practical. No hype. No jargon. Show don't tell.
|
Always create real files in the repo (e.g. blog/2026-02-release.md) and commit them.`,
|
||||||
|
|
||||||
When writing content, create actual files in the repository (e.g. blog/2026-02-release.md) and commit them.`,
|
|
||||||
tools: pick([...FILE_TOOLS, ...GIT_TOOLS])
|
tools: pick([...FILE_TOOLS, ...GIT_TOOLS])
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
67
dist/llm.d.ts
vendored
Normal file
67
dist/llm.d.ts
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
export interface LLMMessage {
|
||||||
|
role: 'system' | 'user' | 'assistant' | 'tool';
|
||||||
|
content: string | null;
|
||||||
|
tool_calls?: LLMToolCall[];
|
||||||
|
tool_call_id?: string;
|
||||||
|
name?: string;
|
||||||
|
}
|
||||||
|
export interface LLMToolCall {
|
||||||
|
id: string;
|
||||||
|
type: 'function';
|
||||||
|
function: {
|
||||||
|
name: string;
|
||||||
|
arguments: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
export interface LLMTool {
|
||||||
|
type: 'function';
|
||||||
|
function: {
|
||||||
|
name: string;
|
||||||
|
description: string;
|
||||||
|
parameters: Record<string, unknown>;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
export interface LLMResponse {
|
||||||
|
content: string | null;
|
||||||
|
reasoning: string | null;
|
||||||
|
tool_calls: LLMToolCall[];
|
||||||
|
finish_reason: string;
|
||||||
|
usage?: {
|
||||||
|
prompt_tokens: number;
|
||||||
|
completion_tokens: number;
|
||||||
|
total_tokens: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
export interface LLMClient {
|
||||||
|
modelId: string;
|
||||||
|
chat(messages: LLMMessage[], tools?: LLMTool[], maxTokens?: number): Promise<LLMResponse>;
|
||||||
|
}
|
||||||
|
export declare class VertexOpenAIClient implements LLMClient {
|
||||||
|
modelId: string;
|
||||||
|
private projectId;
|
||||||
|
private region;
|
||||||
|
private temperature;
|
||||||
|
constructor(modelId: string, opts?: {
|
||||||
|
projectId?: string;
|
||||||
|
region?: string;
|
||||||
|
temperature?: number;
|
||||||
|
});
|
||||||
|
chat(messages: LLMMessage[], tools?: LLMTool[], maxTokens?: number): Promise<LLMResponse>;
|
||||||
|
}
|
||||||
|
export declare class GeminiClient implements LLMClient {
|
||||||
|
modelId: string;
|
||||||
|
private temperature;
|
||||||
|
constructor(modelId?: string, opts?: {
|
||||||
|
temperature?: number;
|
||||||
|
});
|
||||||
|
chat(messages: LLMMessage[], tools?: LLMTool[], maxTokens?: number): Promise<LLMResponse>;
|
||||||
|
}
|
||||||
|
export type ModelTier = 'A' | 'B' | 'C';
|
||||||
|
export declare function createLLM(modelOrTier: string | ModelTier, opts?: {
|
||||||
|
temperature?: number;
|
||||||
|
}): LLMClient;
|
||||||
|
export declare function toOAITools(tools: Array<{
|
||||||
|
name: string;
|
||||||
|
description: string;
|
||||||
|
parameters: Record<string, unknown>;
|
||||||
|
}>): LLMTool[];
|
||||||
197
dist/llm.js
vendored
Normal file
197
dist/llm.js
vendored
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
"use strict";
|
||||||
|
Object.defineProperty(exports, "__esModule", { value: true });
|
||||||
|
exports.GeminiClient = exports.VertexOpenAIClient = void 0;
|
||||||
|
exports.createLLM = createLLM;
|
||||||
|
exports.toOAITools = toOAITools;
|
||||||
|
const child_process_1 = require("child_process");
|
||||||
|
const genai_1 = require("@google/genai");
|
||||||
|
const uuid_1 = require("uuid");
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Vertex AI OpenAI-compatible client
|
||||||
|
// Used for: zai-org/glm-5-maas, anthropic/claude-sonnet-4-6, etc.
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
let _cachedToken = '';
|
||||||
|
let _tokenExpiry = 0;
|
||||||
|
function getVertexToken() {
|
||||||
|
const now = Date.now();
|
||||||
|
if (_cachedToken && now < _tokenExpiry)
|
||||||
|
return _cachedToken;
|
||||||
|
_cachedToken = (0, child_process_1.execSync)('gcloud auth print-access-token', { encoding: 'utf8' }).trim();
|
||||||
|
_tokenExpiry = now + 55 * 60 * 1000; // tokens last 1hr, refresh at 55min
|
||||||
|
return _cachedToken;
|
||||||
|
}
|
||||||
|
class VertexOpenAIClient {
|
||||||
|
constructor(modelId, opts) {
|
||||||
|
this.modelId = modelId;
|
||||||
|
this.projectId = opts?.projectId ?? process.env.GCP_PROJECT_ID ?? 'master-ai-484822';
|
||||||
|
this.region = opts?.region ?? 'global';
|
||||||
|
this.temperature = opts?.temperature ?? 0.3;
|
||||||
|
}
|
||||||
|
async chat(messages, tools, maxTokens = 4096) {
|
||||||
|
const token = getVertexToken();
|
||||||
|
const base = this.region === 'global'
|
||||||
|
? 'https://aiplatform.googleapis.com'
|
||||||
|
: `https://${this.region}-aiplatform.googleapis.com`;
|
||||||
|
const url = `${base}/v1/projects/${this.projectId}/locations/${this.region}/endpoints/openapi/chat/completions`;
|
||||||
|
const body = {
|
||||||
|
model: this.modelId,
|
||||||
|
messages,
|
||||||
|
max_tokens: maxTokens,
|
||||||
|
temperature: this.temperature,
|
||||||
|
stream: false
|
||||||
|
};
|
||||||
|
if (tools && tools.length > 0) {
|
||||||
|
body.tools = tools;
|
||||||
|
body.tool_choice = 'auto';
|
||||||
|
}
|
||||||
|
const res = await fetch(url, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Bearer ${token}`,
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
body: JSON.stringify(body)
|
||||||
|
});
|
||||||
|
if (!res.ok) {
|
||||||
|
const errText = await res.text();
|
||||||
|
// Force token refresh on 401
|
||||||
|
if (res.status === 401)
|
||||||
|
_tokenExpiry = 0;
|
||||||
|
throw new Error(`Vertex API ${res.status}: ${errText.slice(0, 400)}`);
|
||||||
|
}
|
||||||
|
const data = await res.json();
|
||||||
|
const choice = data.choices?.[0];
|
||||||
|
const message = choice?.message ?? {};
|
||||||
|
return {
|
||||||
|
content: message.content ?? null,
|
||||||
|
reasoning: message.reasoning_content ?? null,
|
||||||
|
tool_calls: message.tool_calls ?? [],
|
||||||
|
finish_reason: choice?.finish_reason ?? 'stop',
|
||||||
|
usage: data.usage
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
exports.VertexOpenAIClient = VertexOpenAIClient;
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Gemini client via @google/genai SDK
|
||||||
|
// Used for: Tier A (fast/cheap routing, summaries, log parsing)
|
||||||
|
// Converts to/from OpenAI message format internally.
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
class GeminiClient {
|
||||||
|
constructor(modelId = 'gemini-2.5-flash', opts) {
|
||||||
|
this.modelId = modelId;
|
||||||
|
this.temperature = opts?.temperature ?? 0.2;
|
||||||
|
}
|
||||||
|
async chat(messages, tools, maxTokens = 8192) {
|
||||||
|
const apiKey = process.env.GOOGLE_API_KEY;
|
||||||
|
if (!apiKey)
|
||||||
|
throw new Error('GOOGLE_API_KEY not set');
|
||||||
|
const genai = new genai_1.GoogleGenAI({ apiKey });
|
||||||
|
const systemMsg = messages.find(m => m.role === 'system');
|
||||||
|
const nonSystem = messages.filter(m => m.role !== 'system');
|
||||||
|
const functionDeclarations = (tools ?? []).map(t => ({
|
||||||
|
name: t.function.name,
|
||||||
|
description: t.function.description,
|
||||||
|
parameters: t.function.parameters
|
||||||
|
}));
|
||||||
|
const response = await genai.models.generateContent({
|
||||||
|
model: this.modelId,
|
||||||
|
contents: toGeminiContents(nonSystem),
|
||||||
|
config: {
|
||||||
|
systemInstruction: systemMsg?.content ?? undefined,
|
||||||
|
tools: functionDeclarations.length > 0 ? [{ functionDeclarations }] : undefined,
|
||||||
|
temperature: this.temperature,
|
||||||
|
maxOutputTokens: maxTokens
|
||||||
|
}
|
||||||
|
});
|
||||||
|
const candidate = response.candidates?.[0];
|
||||||
|
if (!candidate)
|
||||||
|
throw new Error('No response from Gemini');
|
||||||
|
const parts = candidate.content?.parts ?? [];
|
||||||
|
const textContent = parts.filter(p => p.text).map(p => p.text).join('') || null;
|
||||||
|
const fnCalls = parts.filter(p => p.functionCall);
|
||||||
|
const tool_calls = fnCalls.map(p => ({
|
||||||
|
id: `call_${(0, uuid_1.v4)().replace(/-/g, '').slice(0, 12)}`,
|
||||||
|
type: 'function',
|
||||||
|
function: {
|
||||||
|
name: p.functionCall.name ?? '',
|
||||||
|
arguments: JSON.stringify(p.functionCall.args ?? {})
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
return {
|
||||||
|
content: textContent,
|
||||||
|
reasoning: null,
|
||||||
|
tool_calls,
|
||||||
|
finish_reason: fnCalls.length > 0 ? 'tool_calls' : 'stop'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
exports.GeminiClient = GeminiClient;
|
||||||
|
/** Convert OpenAI message format → Gemini Content[] format */
|
||||||
|
function toGeminiContents(messages) {
|
||||||
|
const contents = [];
|
||||||
|
for (const msg of messages) {
|
||||||
|
if (msg.role === 'assistant') {
|
||||||
|
const parts = [];
|
||||||
|
if (msg.content)
|
||||||
|
parts.push({ text: msg.content });
|
||||||
|
for (const tc of msg.tool_calls ?? []) {
|
||||||
|
parts.push({
|
||||||
|
functionCall: {
|
||||||
|
name: tc.function.name,
|
||||||
|
args: JSON.parse(tc.function.arguments || '{}')
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
contents.push({ role: 'model', parts });
|
||||||
|
}
|
||||||
|
else if (msg.role === 'tool') {
|
||||||
|
// Parse content back — could be JSON or plain text
|
||||||
|
let resultValue = msg.content;
|
||||||
|
try {
|
||||||
|
resultValue = JSON.parse(msg.content ?? 'null');
|
||||||
|
}
|
||||||
|
catch { /* keep as string */ }
|
||||||
|
contents.push({
|
||||||
|
role: 'user',
|
||||||
|
parts: [{
|
||||||
|
functionResponse: {
|
||||||
|
name: msg.name ?? 'tool',
|
||||||
|
response: { result: resultValue }
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
contents.push({ role: 'user', parts: [{ text: msg.content ?? '' }] });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return contents;
|
||||||
|
}
|
||||||
|
const TIER_MODELS = {
|
||||||
|
A: process.env.TIER_A_MODEL ?? 'gemini-2.5-flash',
|
||||||
|
B: process.env.TIER_B_MODEL ?? 'zai-org/glm-5-maas',
|
||||||
|
C: process.env.TIER_C_MODEL ?? 'zai-org/glm-5-maas'
|
||||||
|
};
|
||||||
|
function createLLM(modelOrTier, opts) {
|
||||||
|
const modelId = (modelOrTier === 'A' || modelOrTier === 'B' || modelOrTier === 'C')
|
||||||
|
? TIER_MODELS[modelOrTier]
|
||||||
|
: modelOrTier;
|
||||||
|
if (modelId.startsWith('gemini-')) {
|
||||||
|
return new GeminiClient(modelId, opts);
|
||||||
|
}
|
||||||
|
return new VertexOpenAIClient(modelId, { temperature: opts?.temperature });
|
||||||
|
}
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helper — convert our ToolDefinition[] → LLMTool[] (OpenAI format)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
function toOAITools(tools) {
|
||||||
|
return tools.map(t => ({
|
||||||
|
type: 'function',
|
||||||
|
function: {
|
||||||
|
name: t.name,
|
||||||
|
description: t.description,
|
||||||
|
parameters: t.parameters
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
20
dist/orchestrator.d.ts
vendored
20
dist/orchestrator.d.ts
vendored
@@ -1,4 +1,5 @@
|
|||||||
import { ToolContext } from './tools';
|
import { LLMMessage } from './llm';
|
||||||
|
import { ToolContext, MemoryUpdate } from './tools';
|
||||||
export declare function listSessions(): {
|
export declare function listSessions(): {
|
||||||
id: string;
|
id: string;
|
||||||
messages: number;
|
messages: number;
|
||||||
@@ -6,14 +7,21 @@ export declare function listSessions(): {
|
|||||||
lastActiveAt: string;
|
lastActiveAt: string;
|
||||||
}[];
|
}[];
|
||||||
export declare function clearSession(sessionId: string): void;
|
export declare function clearSession(sessionId: string): void;
|
||||||
export interface ChatMessage {
|
|
||||||
role: 'user' | 'assistant';
|
|
||||||
content: string;
|
|
||||||
}
|
|
||||||
export interface ChatResult {
|
export interface ChatResult {
|
||||||
reply: string;
|
reply: string;
|
||||||
|
reasoning: string | null;
|
||||||
sessionId: string;
|
sessionId: string;
|
||||||
turns: number;
|
turns: number;
|
||||||
toolCalls: string[];
|
toolCalls: string[];
|
||||||
|
model: string;
|
||||||
|
/** Updated conversation history — caller should persist this */
|
||||||
|
history: LLMMessage[];
|
||||||
|
/** Knowledge items the AI chose to save this turn */
|
||||||
|
memoryUpdates: MemoryUpdate[];
|
||||||
}
|
}
|
||||||
export declare function orchestratorChat(sessionId: string, userMessage: string, ctx: ToolContext): Promise<ChatResult>;
|
export declare function orchestratorChat(sessionId: string, userMessage: string, ctx: ToolContext, opts?: {
|
||||||
|
/** Pre-load history from DB — replaces in-memory session history */
|
||||||
|
preloadedHistory?: LLMMessage[];
|
||||||
|
/** Knowledge items to inject as context at start of conversation */
|
||||||
|
knowledgeContext?: string;
|
||||||
|
}): Promise<ChatResult>;
|
||||||
|
|||||||
198
dist/orchestrator.js
vendored
198
dist/orchestrator.js
vendored
@@ -3,7 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|||||||
exports.listSessions = listSessions;
|
exports.listSessions = listSessions;
|
||||||
exports.clearSession = clearSession;
|
exports.clearSession = clearSession;
|
||||||
exports.orchestratorChat = orchestratorChat;
|
exports.orchestratorChat = orchestratorChat;
|
||||||
const genai_1 = require("@google/genai");
|
const llm_1 = require("./llm");
|
||||||
const tools_1 = require("./tools");
|
const tools_1 = require("./tools");
|
||||||
const MAX_TURNS = 20;
|
const MAX_TURNS = 20;
|
||||||
const sessions = new Map();
|
const sessions = new Map();
|
||||||
@@ -32,131 +32,141 @@ function clearSession(sessionId) {
|
|||||||
sessions.delete(sessionId);
|
sessions.delete(sessionId);
|
||||||
}
|
}
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Orchestrator system prompt — full Vibn context
|
// Orchestrator system prompt
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
const SYSTEM_PROMPT = `You are the Master Orchestrator for Vibn — an AI-powered cloud development platform.
|
const SYSTEM_PROMPT = `You are the Master Orchestrator for Vibn — an AI-powered cloud development platform.
|
||||||
|
|
||||||
You are always running. You have full awareness of the Vibn project and can take autonomous action.
|
You run continuously and have full awareness of the Vibn project. You can take autonomous action on behalf of the user.
|
||||||
|
|
||||||
## What Vibn is
|
## What Vibn is
|
||||||
Vibn is a platform that lets developers build products using AI agents. It includes:
|
Vibn lets developers build products using AI agents:
|
||||||
- A cloud IDE (Theia) at theia.vibnai.com
|
- Frontend app (Next.js) at vibnai.com
|
||||||
- A frontend app (Next.js) at vibnai.com
|
- Backend API at api.vibnai.com
|
||||||
- A backend API at api.vibnai.com
|
- Agent runner (this system) at agents.vibnai.com
|
||||||
- An agent runner (this system) at agents.vibnai.com
|
- Cloud IDE (Theia) at theia.vibnai.com
|
||||||
- Self-hosted Git at git.vibnai.com
|
- Self-hosted Git at git.vibnai.com (user: mark)
|
||||||
- Self-hosted deployments via Coolify at coolify.vibnai.com
|
- Deployments via Coolify at coolify.vibnai.com (server: 34.19.250.135, Montreal)
|
||||||
|
|
||||||
## Your capabilities
|
## Your tools
|
||||||
You have access to tools that give you full project control:
|
|
||||||
|
|
||||||
**Awareness tools** (use these to understand current state):
|
**Awareness** (understand current state first):
|
||||||
- list_repos — see all Git repositories
|
- list_repos — all Git repositories
|
||||||
- list_all_issues — see what work is open or in progress
|
- list_all_issues — open/in-progress work
|
||||||
- list_all_apps — see all deployed apps and their status
|
- list_all_apps — deployed apps and their status
|
||||||
- get_app_status — check if a specific app is running and healthy
|
- get_app_status — health of a specific app
|
||||||
- read_repo_file — read any file from any repo without cloning
|
- read_repo_file — read any file from any repo without cloning
|
||||||
|
|
||||||
**Action tools** (use these to get things done):
|
**Action** (get things done):
|
||||||
- spawn_agent — dispatch Coder, PM, or Marketing agent to do work on a repo
|
- spawn_agent — dispatch Coder, PM, or Marketing agent on a repo
|
||||||
- get_job_status — check if a spawned agent job is done
|
- get_job_status — check a running agent job
|
||||||
- deploy_app — trigger a Coolify deployment after code is committed
|
- deploy_app — trigger a Coolify deployment
|
||||||
- gitea_create_issue — create a tracked issue (also triggers agent webhook if labelled)
|
- gitea_create_issue — track work (label agent:coder/pm/marketing to auto-trigger)
|
||||||
- gitea_list_issues, gitea_close_issue — manage issue lifecycle
|
- gitea_list_issues / gitea_close_issue — issue lifecycle
|
||||||
|
|
||||||
## Available agents you can spawn
|
## Specialist agents you can spawn
|
||||||
- **Coder** — writes code, edits files, runs commands, commits and pushes
|
- **Coder** — writes code, tests, commits, and pushes
|
||||||
- **PM** — writes documentation, manages issues, creates reports
|
- **PM** — docs, issues, sprint tracking
|
||||||
- **Marketing** — writes copy, blog posts, release notes
|
- **Marketing** — copy, release notes, blog posts
|
||||||
|
|
||||||
## How you work
|
## How you work
|
||||||
1. When the user gives you a task, think about what needs to happen.
|
1. Use awareness tools first if you need current state.
|
||||||
2. Use awareness tools first to understand current state if needed.
|
2. Break the task into concrete steps.
|
||||||
3. Break the task into concrete actions.
|
3. Spawn the right agent(s) with specific, detailed instructions.
|
||||||
4. Spawn the right agents with detailed, specific task descriptions.
|
4. Track and report on results.
|
||||||
5. Check back on job status if the user wants to track progress.
|
5. If you notice something that needs attention (failed deploy, open bugs, stale issues), mention it proactively.
|
||||||
6. Report clearly what was done and what's next.
|
|
||||||
|
|
||||||
## Your personality
|
## Style
|
||||||
- Direct and clear. No fluff.
|
- Direct. No filler.
|
||||||
- Proactive — if you notice something that needs fixing, mention it.
|
- Honest about uncertainty.
|
||||||
- Honest about what you can and can't do.
|
- When spawning agents, be specific — give them full context, not vague instructions.
|
||||||
- You speak for the whole system, not just one agent.
|
- Keep responses concise unless the user needs detail.
|
||||||
|
|
||||||
## Important context
|
## Security
|
||||||
- All repos are owned by "mark" on git.vibnai.com
|
- Never spawn agents on: mark/vibn-frontend, mark/theia-code-os, mark/vibn-agent-runner, mark/vibn-api, mark/master-ai
|
||||||
- The main repos are: vibn-frontend, vibn-api, vibn-agent-runner, theia-code-os
|
- Those are protected platform repos — read-only for you, not writable by agents.`;
|
||||||
- The stack: Next.js (frontend), Node.js (API + agent runner), Theia (IDE)
|
// ---------------------------------------------------------------------------
|
||||||
- Coolify manages all deployments on server 34.19.250.135 (Montreal)
|
// Main orchestrator chat — uses GLM-5 (Tier B) by default
|
||||||
- Agent label routing: agent:coder, agent:pm, agent:marketing on Gitea issues`;
|
// ---------------------------------------------------------------------------
|
||||||
async function orchestratorChat(sessionId, userMessage, ctx) {
|
async function orchestratorChat(sessionId, userMessage, ctx, opts) {
|
||||||
const apiKey = process.env.GOOGLE_API_KEY;
|
const modelId = process.env.ORCHESTRATOR_MODEL ?? 'B'; // Tier B = GLM-5
|
||||||
if (!apiKey)
|
const llm = (0, llm_1.createLLM)(modelId, { temperature: 0.3 });
|
||||||
throw new Error('GOOGLE_API_KEY not set');
|
|
||||||
const genai = new genai_1.GoogleGenAI({ apiKey });
|
|
||||||
const session = getOrCreateSession(sessionId);
|
const session = getOrCreateSession(sessionId);
|
||||||
// Orchestrator gets ALL tools
|
// Seed session from DB history if provided and session is fresh
|
||||||
const functionDeclarations = tools_1.ALL_TOOLS.map(t => ({
|
if (opts?.preloadedHistory && opts.preloadedHistory.length > 0 && session.history.length === 0) {
|
||||||
name: t.name,
|
session.history = [...opts.preloadedHistory];
|
||||||
description: t.description,
|
}
|
||||||
parameters: t.parameters
|
const oaiTools = (0, llm_1.toOAITools)(tools_1.ALL_TOOLS);
|
||||||
}));
|
// Append user message
|
||||||
// Add user message to history
|
session.history.push({ role: 'user', content: userMessage });
|
||||||
session.history.push({ role: 'user', parts: [{ text: userMessage }] });
|
|
||||||
let turn = 0;
|
let turn = 0;
|
||||||
let finalReply = '';
|
let finalReply = '';
|
||||||
|
let finalReasoning = null;
|
||||||
const toolCallNames = [];
|
const toolCallNames = [];
|
||||||
|
// Build messages with system prompt prepended
|
||||||
|
const buildMessages = () => [
|
||||||
|
{ role: 'system', content: SYSTEM_PROMPT },
|
||||||
|
...session.history
|
||||||
|
];
|
||||||
while (turn < MAX_TURNS) {
|
while (turn < MAX_TURNS) {
|
||||||
turn++;
|
turn++;
|
||||||
const response = await genai.models.generateContent({
|
const response = await llm.chat(buildMessages(), oaiTools, 4096);
|
||||||
model: 'gemini-2.5-flash',
|
// If GLM-5 is still reasoning (content null, finish_reason length) give it more tokens
|
||||||
contents: session.history,
|
if (response.content === null && response.tool_calls.length === 0 && response.finish_reason === 'length') {
|
||||||
config: {
|
// Retry with more tokens — model hit max_tokens during reasoning
|
||||||
systemInstruction: SYSTEM_PROMPT,
|
const retry = await llm.chat(buildMessages(), oaiTools, 8192);
|
||||||
tools: [{ functionDeclarations }],
|
Object.assign(response, retry);
|
||||||
temperature: 0.3,
|
}
|
||||||
maxOutputTokens: 8192
|
// Record reasoning for the final turn (informational, not stored in history)
|
||||||
}
|
if (response.reasoning)
|
||||||
});
|
finalReasoning = response.reasoning;
|
||||||
const candidate = response.candidates?.[0];
|
// Build assistant message to add to history
|
||||||
if (!candidate)
|
const assistantMsg = {
|
||||||
throw new Error('No response from Gemini');
|
role: 'assistant',
|
||||||
const modelContent = {
|
content: response.content,
|
||||||
role: 'model',
|
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
|
||||||
parts: candidate.content?.parts || []
|
|
||||||
};
|
};
|
||||||
session.history.push(modelContent);
|
session.history.push(assistantMsg);
|
||||||
const functionCalls = candidate.content?.parts?.filter(p => p.functionCall) ?? [];
|
// No tool calls — we have the final answer
|
||||||
// No more tool calls — we have the final answer
|
if (response.tool_calls.length === 0) {
|
||||||
if (functionCalls.length === 0) {
|
finalReply = response.content ?? '';
|
||||||
finalReply = candidate.content?.parts
|
|
||||||
?.filter(p => p.text)
|
|
||||||
.map(p => p.text)
|
|
||||||
.join('') ?? '';
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Execute tool calls
|
// Execute each tool call and collect results
|
||||||
const toolResultParts = [];
|
for (const tc of response.tool_calls) {
|
||||||
for (const part of functionCalls) {
|
const fnName = tc.function.name;
|
||||||
const call = part.functionCall;
|
let fnArgs = {};
|
||||||
const callName = call.name ?? 'unknown';
|
try {
|
||||||
const callArgs = (call.args ?? {});
|
fnArgs = JSON.parse(tc.function.arguments || '{}');
|
||||||
toolCallNames.push(callName);
|
}
|
||||||
|
catch { /* bad JSON */ }
|
||||||
|
toolCallNames.push(fnName);
|
||||||
let result;
|
let result;
|
||||||
try {
|
try {
|
||||||
result = await (0, tools_1.executeTool)(callName, callArgs, ctx);
|
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
result = { error: err instanceof Error ? err.message : String(err) };
|
result = { error: err instanceof Error ? err.message : String(err) };
|
||||||
}
|
}
|
||||||
toolResultParts.push({
|
// Add tool result to history
|
||||||
functionResponse: { name: callName, response: { result } }
|
session.history.push({
|
||||||
|
role: 'tool',
|
||||||
|
tool_call_id: tc.id,
|
||||||
|
name: fnName,
|
||||||
|
content: typeof result === 'string' ? result : JSON.stringify(result)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
session.history.push({ role: 'user', parts: toolResultParts });
|
|
||||||
}
|
}
|
||||||
if (turn >= MAX_TURNS && !finalReply) {
|
if (turn >= MAX_TURNS && !finalReply) {
|
||||||
finalReply = 'I hit the turn limit. Please try a more specific request.';
|
finalReply = 'Hit the turn limit. Try a more specific request.';
|
||||||
}
|
}
|
||||||
return { reply: finalReply, sessionId, turns: turn, toolCalls: toolCallNames };
|
return {
|
||||||
|
reply: finalReply,
|
||||||
|
reasoning: finalReasoning,
|
||||||
|
sessionId,
|
||||||
|
turns: turn,
|
||||||
|
toolCalls: toolCallNames,
|
||||||
|
model: llm.modelId,
|
||||||
|
history: session.history.slice(-40),
|
||||||
|
memoryUpdates: ctx.memoryUpdates
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
38
dist/server.js
vendored
38
dist/server.js
vendored
@@ -46,8 +46,17 @@ const job_store_1 = require("./job-store");
|
|||||||
const agent_runner_1 = require("./agent-runner");
|
const agent_runner_1 = require("./agent-runner");
|
||||||
const agents_1 = require("./agents");
|
const agents_1 = require("./agents");
|
||||||
const orchestrator_1 = require("./orchestrator");
|
const orchestrator_1 = require("./orchestrator");
|
||||||
|
// Protected Vibn platform repos — agents cannot clone or work in these workspaces
|
||||||
|
const PROTECTED_GITEA_REPOS = new Set([
|
||||||
|
'mark/vibn-frontend',
|
||||||
|
'mark/theia-code-os',
|
||||||
|
'mark/vibn-agent-runner',
|
||||||
|
'mark/vibn-api',
|
||||||
|
'mark/master-ai',
|
||||||
|
]);
|
||||||
const app = (0, express_1.default)();
|
const app = (0, express_1.default)();
|
||||||
app.use((0, cors_1.default)());
|
app.use((0, cors_1.default)());
|
||||||
|
const startTime = new Date();
|
||||||
// Raw body capture for webhook HMAC — must come before express.json()
|
// Raw body capture for webhook HMAC — must come before express.json()
|
||||||
app.use('/webhook/gitea', express_1.default.raw({ type: '*/*' }));
|
app.use('/webhook/gitea', express_1.default.raw({ type: '*/*' }));
|
||||||
app.use(express_1.default.json());
|
app.use(express_1.default.json());
|
||||||
@@ -62,6 +71,10 @@ function ensureWorkspace(repo) {
|
|||||||
fs.mkdirSync(dir, { recursive: true });
|
fs.mkdirSync(dir, { recursive: true });
|
||||||
return dir;
|
return dir;
|
||||||
}
|
}
|
||||||
|
if (PROTECTED_GITEA_REPOS.has(repo)) {
|
||||||
|
throw new Error(`SECURITY: Repo "${repo}" is a protected Vibn platform repo. ` +
|
||||||
|
`Agents cannot clone or work in this workspace.`);
|
||||||
|
}
|
||||||
const dir = path.join(base, repo.replace('/', '_'));
|
const dir = path.join(base, repo.replace('/', '_'));
|
||||||
const gitea = {
|
const gitea = {
|
||||||
apiUrl: process.env.GITEA_API_URL || '',
|
apiUrl: process.env.GITEA_API_URL || '',
|
||||||
@@ -95,7 +108,8 @@ function buildContext(repo) {
|
|||||||
coolify: {
|
coolify: {
|
||||||
apiUrl: process.env.COOLIFY_API_URL || '',
|
apiUrl: process.env.COOLIFY_API_URL || '',
|
||||||
apiToken: process.env.COOLIFY_API_TOKEN || ''
|
apiToken: process.env.COOLIFY_API_TOKEN || ''
|
||||||
}
|
},
|
||||||
|
memoryUpdates: []
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@@ -114,6 +128,28 @@ app.get('/api/agents', (_req, res) => {
|
|||||||
}));
|
}));
|
||||||
res.json(agents);
|
res.json(agents);
|
||||||
});
|
});
|
||||||
|
// Get server status and job statistics
|
||||||
|
app.get('/api/status', (_req, res) => {
|
||||||
|
const allJobs = (0, job_store_1.listJobs)(Infinity);
|
||||||
|
const total_jobs = allJobs.length;
|
||||||
|
const by_status = {
|
||||||
|
queued: 0,
|
||||||
|
running: 0,
|
||||||
|
completed: 0,
|
||||||
|
failed: 0,
|
||||||
|
};
|
||||||
|
for (const job of allJobs) {
|
||||||
|
by_status[job.status] = (by_status[job.status] || 0) + 1;
|
||||||
|
}
|
||||||
|
const uptime_seconds = Math.floor((new Date().getTime() - startTime.getTime()) / 1000);
|
||||||
|
const agents = Object.values(agents_1.AGENTS).map(a => a.name);
|
||||||
|
res.json({
|
||||||
|
total_jobs,
|
||||||
|
by_status,
|
||||||
|
uptime_seconds,
|
||||||
|
agents,
|
||||||
|
});
|
||||||
|
});
|
||||||
// Submit a new job
|
// Submit a new job
|
||||||
app.post('/api/agent/run', async (req, res) => {
|
app.post('/api/agent/run', async (req, res) => {
|
||||||
const { agent: agentName, task, repo } = req.body;
|
const { agent: agentName, task, repo } = req.body;
|
||||||
|
|||||||
1
dist/test.d.ts
vendored
Normal file
1
dist/test.d.ts
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
export {};
|
||||||
13
dist/test.js
vendored
Normal file
13
dist/test.js
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
"use strict";
|
||||||
|
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||||
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||||
|
};
|
||||||
|
Object.defineProperty(exports, "__esModule", { value: true });
|
||||||
|
const assert_1 = __importDefault(require("assert"));
|
||||||
|
function add(a, b) {
|
||||||
|
return a + b;
|
||||||
|
}
|
||||||
|
assert_1.default.strictEqual(add(1, 2), 3, 'add(1, 2) should be 3');
|
||||||
|
assert_1.default.strictEqual(add(0, 0), 0, 'add(0, 0) should be 0');
|
||||||
|
assert_1.default.strictEqual(add(-1, 1), 0, 'add(-1, 1) should be 0');
|
||||||
|
console.log('All tests passed!');
|
||||||
7
dist/tools.d.ts
vendored
7
dist/tools.d.ts
vendored
@@ -1,3 +1,8 @@
|
|||||||
|
export interface MemoryUpdate {
|
||||||
|
key: string;
|
||||||
|
type: string;
|
||||||
|
value: string;
|
||||||
|
}
|
||||||
export interface ToolContext {
|
export interface ToolContext {
|
||||||
workspaceRoot: string;
|
workspaceRoot: string;
|
||||||
gitea: {
|
gitea: {
|
||||||
@@ -9,6 +14,8 @@ export interface ToolContext {
|
|||||||
apiUrl: string;
|
apiUrl: string;
|
||||||
apiToken: string;
|
apiToken: string;
|
||||||
};
|
};
|
||||||
|
/** Accumulated memory updates from save_memory tool calls in this turn */
|
||||||
|
memoryUpdates: MemoryUpdate[];
|
||||||
}
|
}
|
||||||
export interface ToolDefinition {
|
export interface ToolDefinition {
|
||||||
name: string;
|
name: string;
|
||||||
|
|||||||
117
dist/tools.js
vendored
117
dist/tools.js
vendored
@@ -41,6 +41,45 @@ const cp = __importStar(require("child_process"));
|
|||||||
const util = __importStar(require("util"));
|
const util = __importStar(require("util"));
|
||||||
const minimatch_1 = require("minimatch");
|
const minimatch_1 = require("minimatch");
|
||||||
const execAsync = util.promisify(cp.exec);
|
const execAsync = util.promisify(cp.exec);
|
||||||
|
// =============================================================================
|
||||||
|
// SECURITY GUARDRAILS — Protected VIBN Platform Resources
|
||||||
|
//
|
||||||
|
// These repos and Coolify resources belong to the Vibn platform itself.
|
||||||
|
// Agents must never be allowed to push code or trigger deployments here.
|
||||||
|
// Read-only operations (list, read file, get status) are still permitted
|
||||||
|
// so agents can observe the platform state, but all mutations are blocked.
|
||||||
|
// =============================================================================
|
||||||
|
/** Gitea repos that agents can NEVER push to, commit to, or write issues on. */
|
||||||
|
const PROTECTED_GITEA_REPOS = new Set([
|
||||||
|
'mark/vibn-frontend',
|
||||||
|
'mark/theia-code-os',
|
||||||
|
'mark/vibn-agent-runner',
|
||||||
|
'mark/vibn-api',
|
||||||
|
'mark/master-ai',
|
||||||
|
]);
|
||||||
|
/** Coolify project UUID for the VIBN platform — agents cannot deploy here. */
|
||||||
|
const PROTECTED_COOLIFY_PROJECT = 'f4owwggokksgw0ogo0844os0';
|
||||||
|
/**
|
||||||
|
* Specific Coolify app UUIDs that must never be deployed by an agent.
|
||||||
|
* This is a belt-and-suspenders check in case the project UUID filter is bypassed.
|
||||||
|
*/
|
||||||
|
const PROTECTED_COOLIFY_APPS = new Set([
|
||||||
|
'y4cscsc8s08c8808go0448s0', // vibn-frontend
|
||||||
|
'kggs4ogckc0w8ggwkkk88kck', // vibn-postgres
|
||||||
|
'o4wwck0g0c04wgoo4g4s0004', // gitea
|
||||||
|
]);
|
||||||
|
function assertGiteaWritable(repo) {
|
||||||
|
if (PROTECTED_GITEA_REPOS.has(repo)) {
|
||||||
|
throw new Error(`SECURITY: Repo "${repo}" is a protected Vibn platform repo. ` +
|
||||||
|
`Agents cannot push code or modify issues in this repository.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function assertCoolifyDeployable(appUuid) {
|
||||||
|
if (PROTECTED_COOLIFY_APPS.has(appUuid)) {
|
||||||
|
throw new Error(`SECURITY: App "${appUuid}" is a protected Vibn platform application. ` +
|
||||||
|
`Agents cannot trigger deployments for this application.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
exports.ALL_TOOLS = [
|
exports.ALL_TOOLS = [
|
||||||
{
|
{
|
||||||
name: 'read_file',
|
name: 'read_file',
|
||||||
@@ -296,6 +335,23 @@ exports.ALL_TOOLS = [
|
|||||||
},
|
},
|
||||||
required: ['app_name']
|
required: ['app_name']
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'save_memory',
|
||||||
|
description: 'Persist an important fact about this project to long-term memory. Use this to save decisions, tech stack choices, feature descriptions, constraints, or goals so they are remembered across conversations.',
|
||||||
|
parameters: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
key: { type: 'string', description: 'Short unique label (e.g. "primary_language", "auth_strategy", "deploy_target")' },
|
||||||
|
type: {
|
||||||
|
type: 'string',
|
||||||
|
enum: ['tech_stack', 'decision', 'feature', 'goal', 'constraint', 'note'],
|
||||||
|
description: 'Category of the memory item'
|
||||||
|
},
|
||||||
|
value: { type: 'string', description: 'The fact to remember (1-3 sentences)' }
|
||||||
|
},
|
||||||
|
required: ['key', 'type', 'value']
|
||||||
|
}
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@@ -452,6 +508,21 @@ async function gitCommitAndPush(message, ctx) {
|
|||||||
const cwd = ctx.workspaceRoot;
|
const cwd = ctx.workspaceRoot;
|
||||||
const { apiUrl, apiToken, username } = ctx.gitea;
|
const { apiUrl, apiToken, username } = ctx.gitea;
|
||||||
try {
|
try {
|
||||||
|
// Check the remote URL before committing — block pushes to protected repos
|
||||||
|
let remoteCheck = '';
|
||||||
|
try {
|
||||||
|
remoteCheck = (await execAsync('git remote get-url origin', { cwd })).stdout.trim();
|
||||||
|
}
|
||||||
|
catch { /* ok */ }
|
||||||
|
for (const protectedRepo of PROTECTED_GITEA_REPOS) {
|
||||||
|
const repoPath = protectedRepo.replace('mark/', '');
|
||||||
|
if (remoteCheck.includes(`/${repoPath}`) || remoteCheck.includes(`/${repoPath}.git`)) {
|
||||||
|
return {
|
||||||
|
error: `SECURITY: This workspace is linked to a protected Vibn platform repo (${protectedRepo}). ` +
|
||||||
|
`Agents cannot push to platform repos. Only user project repos are writable.`
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
await execAsync('git add -A', { cwd });
|
await execAsync('git add -A', { cwd });
|
||||||
await execAsync(`git commit -m "${message.replace(/"/g, '\\"')}"`, { cwd });
|
await execAsync(`git commit -m "${message.replace(/"/g, '\\"')}"`, { cwd });
|
||||||
// Get current remote URL, strip any existing credentials, re-inject cleanly
|
// Get current remote URL, strip any existing credentials, re-inject cleanly
|
||||||
@@ -493,7 +564,11 @@ async function coolifyFetch(path, ctx, method = 'GET', body) {
|
|||||||
return res.json();
|
return res.json();
|
||||||
}
|
}
|
||||||
async function coolifyListProjects(ctx) {
|
async function coolifyListProjects(ctx) {
|
||||||
return coolifyFetch('/projects', ctx);
|
const projects = await coolifyFetch('/projects', ctx);
|
||||||
|
if (!Array.isArray(projects))
|
||||||
|
return projects;
|
||||||
|
// Filter out the protected VIBN project entirely — agents don't need to see it
|
||||||
|
return projects.filter((p) => p.uuid !== PROTECTED_COOLIFY_PROJECT);
|
||||||
}
|
}
|
||||||
async function coolifyListApplications(projectUuid, ctx) {
|
async function coolifyListApplications(projectUuid, ctx) {
|
||||||
const all = await coolifyFetch('/applications', ctx);
|
const all = await coolifyFetch('/applications', ctx);
|
||||||
@@ -502,6 +577,15 @@ async function coolifyListApplications(projectUuid, ctx) {
|
|||||||
return all.filter((a) => a.project_uuid === projectUuid);
|
return all.filter((a) => a.project_uuid === projectUuid);
|
||||||
}
|
}
|
||||||
async function coolifyDeploy(appUuid, ctx) {
|
async function coolifyDeploy(appUuid, ctx) {
|
||||||
|
assertCoolifyDeployable(appUuid);
|
||||||
|
// Also check the app belongs to the right project
|
||||||
|
const apps = await coolifyFetch('/applications', ctx);
|
||||||
|
if (Array.isArray(apps)) {
|
||||||
|
const app = apps.find((a) => a.uuid === appUuid);
|
||||||
|
if (app?.project_uuid === PROTECTED_COOLIFY_PROJECT) {
|
||||||
|
return { error: `SECURITY: App "${appUuid}" belongs to the protected Vibn project. Agents cannot deploy platform apps.` };
|
||||||
|
}
|
||||||
|
}
|
||||||
return coolifyFetch(`/applications/${appUuid}/deploy`, ctx, 'POST');
|
return coolifyFetch(`/applications/${appUuid}/deploy`, ctx, 'POST');
|
||||||
}
|
}
|
||||||
async function coolifyGetLogs(appUuid, ctx) {
|
async function coolifyGetLogs(appUuid, ctx) {
|
||||||
@@ -525,12 +609,14 @@ async function giteaFetch(path, ctx, method = 'GET', body) {
|
|||||||
return res.json();
|
return res.json();
|
||||||
}
|
}
|
||||||
async function giteaCreateIssue(repo, title, body, labels, ctx) {
|
async function giteaCreateIssue(repo, title, body, labels, ctx) {
|
||||||
|
assertGiteaWritable(repo);
|
||||||
return giteaFetch(`/repos/${repo}/issues`, ctx, 'POST', { title, body, labels });
|
return giteaFetch(`/repos/${repo}/issues`, ctx, 'POST', { title, body, labels });
|
||||||
}
|
}
|
||||||
async function giteaListIssues(repo, state, ctx) {
|
async function giteaListIssues(repo, state, ctx) {
|
||||||
return giteaFetch(`/repos/${repo}/issues?state=${state}&limit=20`, ctx);
|
return giteaFetch(`/repos/${repo}/issues?state=${state}&limit=20`, ctx);
|
||||||
}
|
}
|
||||||
async function giteaCloseIssue(repo, issueNumber, ctx) {
|
async function giteaCloseIssue(repo, issueNumber, ctx) {
|
||||||
|
assertGiteaWritable(repo);
|
||||||
return giteaFetch(`/repos/${repo}/issues/${issueNumber}`, ctx, 'PATCH', { state: 'closed' });
|
return giteaFetch(`/repos/${repo}/issues/${issueNumber}`, ctx, 'PATCH', { state: 'closed' });
|
||||||
}
|
}
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@@ -560,7 +646,10 @@ async function listRepos(ctx) {
|
|||||||
headers: { 'Authorization': `token ${ctx.gitea.apiToken}` }
|
headers: { 'Authorization': `token ${ctx.gitea.apiToken}` }
|
||||||
});
|
});
|
||||||
const data = await res.json();
|
const data = await res.json();
|
||||||
return (data.data || []).map((r) => ({
|
return (data.data || [])
|
||||||
|
// Hide protected platform repos from agent's view entirely
|
||||||
|
.filter((r) => !PROTECTED_GITEA_REPOS.has(r.full_name))
|
||||||
|
.map((r) => ({
|
||||||
name: r.full_name,
|
name: r.full_name,
|
||||||
description: r.description,
|
description: r.description,
|
||||||
default_branch: r.default_branch,
|
default_branch: r.default_branch,
|
||||||
@@ -571,9 +660,12 @@ async function listRepos(ctx) {
|
|||||||
}
|
}
|
||||||
async function listAllIssues(repo, state, ctx) {
|
async function listAllIssues(repo, state, ctx) {
|
||||||
if (repo) {
|
if (repo) {
|
||||||
|
if (PROTECTED_GITEA_REPOS.has(repo)) {
|
||||||
|
return { error: `SECURITY: "${repo}" is a protected Vibn platform repo. Agents cannot access its issues.` };
|
||||||
|
}
|
||||||
return giteaFetch(`/repos/${repo}/issues?state=${state}&limit=20`, ctx);
|
return giteaFetch(`/repos/${repo}/issues?state=${state}&limit=20`, ctx);
|
||||||
}
|
}
|
||||||
// Fetch across all repos
|
// Fetch across all non-protected repos
|
||||||
const repos = await listRepos(ctx);
|
const repos = await listRepos(ctx);
|
||||||
const allIssues = [];
|
const allIssues = [];
|
||||||
for (const r of repos.slice(0, 10)) {
|
for (const r of repos.slice(0, 10)) {
|
||||||
@@ -595,7 +687,10 @@ async function listAllApps(ctx) {
|
|||||||
const apps = await coolifyFetch('/applications', ctx);
|
const apps = await coolifyFetch('/applications', ctx);
|
||||||
if (!Array.isArray(apps))
|
if (!Array.isArray(apps))
|
||||||
return apps;
|
return apps;
|
||||||
return apps.map((a) => ({
|
return apps
|
||||||
|
// Filter out apps that belong to the protected VIBN project
|
||||||
|
.filter((a) => a.project_uuid !== PROTECTED_COOLIFY_PROJECT && !PROTECTED_COOLIFY_APPS.has(a.uuid))
|
||||||
|
.map((a) => ({
|
||||||
uuid: a.uuid,
|
uuid: a.uuid,
|
||||||
name: a.name,
|
name: a.name,
|
||||||
fqdn: a.fqdn,
|
fqdn: a.fqdn,
|
||||||
@@ -611,6 +706,9 @@ async function getAppStatus(appName, ctx) {
|
|||||||
const app = apps.find((a) => a.name?.toLowerCase() === appName.toLowerCase() || a.uuid === appName);
|
const app = apps.find((a) => a.name?.toLowerCase() === appName.toLowerCase() || a.uuid === appName);
|
||||||
if (!app)
|
if (!app)
|
||||||
return { error: `App "${appName}" not found` };
|
return { error: `App "${appName}" not found` };
|
||||||
|
if (PROTECTED_COOLIFY_APPS.has(app.uuid) || app.project_uuid === PROTECTED_COOLIFY_PROJECT) {
|
||||||
|
return { error: `SECURITY: "${appName}" is a protected Vibn platform app. Status is not exposed to agents.` };
|
||||||
|
}
|
||||||
const logs = await coolifyFetch(`/applications/${app.uuid}/logs?limit=20`, ctx);
|
const logs = await coolifyFetch(`/applications/${app.uuid}/logs?limit=20`, ctx);
|
||||||
return { name: app.name, uuid: app.uuid, status: app.status, fqdn: app.fqdn, logs };
|
return { name: app.name, uuid: app.uuid, status: app.status, fqdn: app.fqdn, logs };
|
||||||
}
|
}
|
||||||
@@ -648,6 +746,10 @@ async function getJobStatus(jobId) {
|
|||||||
return { error: `Failed to get job: ${err instanceof Error ? err.message : String(err)}` };
|
return { error: `Failed to get job: ${err instanceof Error ? err.message : String(err)}` };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
function saveMemory(key, type, value, ctx) {
|
||||||
|
ctx.memoryUpdates.push({ key, type, value });
|
||||||
|
return { saved: true, key, type };
|
||||||
|
}
|
||||||
async function deployApp(appName, ctx) {
|
async function deployApp(appName, ctx) {
|
||||||
const apps = await coolifyFetch('/applications', ctx);
|
const apps = await coolifyFetch('/applications', ctx);
|
||||||
if (!Array.isArray(apps))
|
if (!Array.isArray(apps))
|
||||||
@@ -655,6 +757,13 @@ async function deployApp(appName, ctx) {
|
|||||||
const app = apps.find((a) => a.name?.toLowerCase() === appName.toLowerCase() || a.uuid === appName);
|
const app = apps.find((a) => a.name?.toLowerCase() === appName.toLowerCase() || a.uuid === appName);
|
||||||
if (!app)
|
if (!app)
|
||||||
return { error: `App "${appName}" not found` };
|
return { error: `App "${appName}" not found` };
|
||||||
|
// Block deployment to protected VIBN platform apps
|
||||||
|
if (PROTECTED_COOLIFY_APPS.has(app.uuid) || app.project_uuid === PROTECTED_COOLIFY_PROJECT) {
|
||||||
|
return {
|
||||||
|
error: `SECURITY: "${appName}" is a protected Vibn platform application. ` +
|
||||||
|
`Agents can only deploy user project apps, not platform infrastructure.`
|
||||||
|
};
|
||||||
|
}
|
||||||
const result = await fetch(`${ctx.coolify.apiUrl}/api/v1/deploy?uuid=${app.uuid}&force=false`, {
|
const result = await fetch(`${ctx.coolify.apiUrl}/api/v1/deploy?uuid=${app.uuid}&force=false`, {
|
||||||
headers: { 'Authorization': `Bearer ${ctx.coolify.apiToken}` }
|
headers: { 'Authorization': `Bearer ${ctx.coolify.apiToken}` }
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { createLLM, toOAITools, LLMMessage } from './llm';
|
import { createLLM, toOAITools, LLMMessage } from './llm';
|
||||||
import { ALL_TOOLS, executeTool, ToolContext } from './tools';
|
import { ALL_TOOLS, executeTool, ToolContext, MemoryUpdate } from './tools';
|
||||||
|
|
||||||
const MAX_TURNS = 20;
|
const MAX_TURNS = 20;
|
||||||
|
|
||||||
@@ -109,6 +109,10 @@ export interface ChatResult {
|
|||||||
turns: number;
|
turns: number;
|
||||||
toolCalls: string[];
|
toolCalls: string[];
|
||||||
model: string;
|
model: string;
|
||||||
|
/** Updated conversation history — caller should persist this */
|
||||||
|
history: LLMMessage[];
|
||||||
|
/** Knowledge items the AI chose to save this turn */
|
||||||
|
memoryUpdates: MemoryUpdate[];
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
Reference in New Issue
Block a user