fix: compile dist from source in Docker, fix ChatResult interface
- Dockerfile now runs tsc during build so committed dist/ is never stale - ChatResult interface was missing history[] and memoryUpdates[] fields - Re-add missing MemoryUpdate import in orchestrator.ts - Rebuild dist/ with all new fields included Made-with: Cursor
This commit is contained in:
125
dist/agent-runner.js
vendored
125
dist/agent-runner.js
vendored
@@ -1,117 +1,80 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.runAgent = runAgent;
|
||||
const genai_1 = require("@google/genai");
|
||||
const llm_1 = require("./llm");
|
||||
const tools_1 = require("./tools");
|
||||
const job_store_1 = require("./job-store");
|
||||
const MAX_TURNS = 40; // safety cap — prevents infinite loops
|
||||
const MAX_TURNS = 40;
|
||||
/**
|
||||
* Core Gemini agent loop.
|
||||
* Core agent execution loop — model-agnostic via the unified LLM client.
|
||||
*
|
||||
* Sends the task to Gemini with the agent's system prompt and tools,
|
||||
* then loops: execute tool calls → send results back → repeat until
|
||||
* the model stops calling tools or MAX_TURNS is reached.
|
||||
* Agents use their configured model tier (A/B/C) or a specific model ID.
|
||||
* Tool calling uses OpenAI format throughout.
|
||||
*/
|
||||
async function runAgent(job, config, task, ctx) {
|
||||
const apiKey = process.env.GOOGLE_API_KEY;
|
||||
if (!apiKey) {
|
||||
throw new Error('GOOGLE_API_KEY environment variable is not set');
|
||||
}
|
||||
const genai = new genai_1.GoogleGenAI({ apiKey });
|
||||
// Build Gemini function declarations from our tool definitions
|
||||
const functionDeclarations = config.tools.map(tool => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters
|
||||
}));
|
||||
const tools = functionDeclarations.length > 0
|
||||
? [{ functionDeclarations }]
|
||||
: [];
|
||||
const model = genai.models;
|
||||
// Build conversation history
|
||||
const history = [];
|
||||
// Initial user message
|
||||
let currentMessage = {
|
||||
role: 'user',
|
||||
parts: [{ text: task }]
|
||||
};
|
||||
const llm = (0, llm_1.createLLM)(config.model, { temperature: 0.2 });
|
||||
const oaiTools = (0, llm_1.toOAITools)(config.tools);
|
||||
const history = [
|
||||
{ role: 'user', content: task }
|
||||
];
|
||||
let toolCallCount = 0;
|
||||
let turn = 0;
|
||||
let finalText = '';
|
||||
(0, job_store_1.updateJob)(job.id, { status: 'running', progress: `Starting ${config.name} agent...` });
|
||||
(0, job_store_1.updateJob)(job.id, { status: 'running', progress: `Starting ${config.name} (${llm.modelId})…` });
|
||||
while (turn < MAX_TURNS) {
|
||||
turn++;
|
||||
// Add current message to history
|
||||
history.push(currentMessage);
|
||||
// Call Gemini
|
||||
const response = await model.generateContent({
|
||||
model: config.model || 'gemini-2.0-flash',
|
||||
contents: history,
|
||||
config: {
|
||||
systemInstruction: config.systemPrompt,
|
||||
tools: tools.length > 0 ? tools : undefined,
|
||||
temperature: 0.2,
|
||||
maxOutputTokens: 8192
|
||||
}
|
||||
});
|
||||
const candidate = response.candidates?.[0];
|
||||
if (!candidate) {
|
||||
throw new Error('No response from Gemini');
|
||||
}
|
||||
// Add model response to history
|
||||
const modelContent = {
|
||||
role: 'model',
|
||||
parts: candidate.content?.parts || []
|
||||
const messages = [
|
||||
{ role: 'system', content: config.systemPrompt },
|
||||
...history
|
||||
];
|
||||
const response = await llm.chat(messages, oaiTools, 8192);
|
||||
// Build assistant message for history
|
||||
const assistantMsg = {
|
||||
role: 'assistant',
|
||||
content: response.content,
|
||||
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
|
||||
};
|
||||
history.push(modelContent);
|
||||
// Extract function calls from the response
|
||||
const functionCalls = candidate.content?.parts?.filter(p => p.functionCall) ?? [];
|
||||
if (functionCalls.length === 0) {
|
||||
// No tool calls — the agent is done
|
||||
finalText = candidate.content?.parts
|
||||
?.filter(p => p.text)
|
||||
.map(p => p.text)
|
||||
.join('') ?? '';
|
||||
history.push(assistantMsg);
|
||||
// No tool calls — agent is done
|
||||
if (response.tool_calls.length === 0) {
|
||||
finalText = response.content ?? '';
|
||||
break;
|
||||
}
|
||||
// Execute all tool calls
|
||||
const toolResultParts = [];
|
||||
for (const part of functionCalls) {
|
||||
const call = part.functionCall;
|
||||
const callName = call.name ?? 'unknown';
|
||||
const callArgs = (call.args ?? {});
|
||||
// Execute tool calls
|
||||
for (const tc of response.tool_calls) {
|
||||
const fnName = tc.function.name;
|
||||
let fnArgs = {};
|
||||
try {
|
||||
fnArgs = JSON.parse(tc.function.arguments || '{}');
|
||||
}
|
||||
catch { /* bad JSON */ }
|
||||
toolCallCount++;
|
||||
(0, job_store_1.updateJob)(job.id, {
|
||||
progress: `Turn ${turn}: calling ${callName}...`,
|
||||
progress: `Turn ${turn}: calling ${fnName}…`,
|
||||
toolCalls: [...(job.toolCalls || []), {
|
||||
turn,
|
||||
tool: callName,
|
||||
args: callArgs,
|
||||
tool: fnName,
|
||||
args: fnArgs,
|
||||
timestamp: new Date().toISOString()
|
||||
}]
|
||||
});
|
||||
let result;
|
||||
try {
|
||||
result = await (0, tools_1.executeTool)(callName, callArgs, ctx);
|
||||
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
|
||||
}
|
||||
catch (err) {
|
||||
result = { error: err instanceof Error ? err.message : String(err) };
|
||||
}
|
||||
toolResultParts.push({
|
||||
functionResponse: {
|
||||
name: callName,
|
||||
response: { result }
|
||||
}
|
||||
history.push({
|
||||
role: 'tool',
|
||||
tool_call_id: tc.id,
|
||||
name: fnName,
|
||||
content: typeof result === 'string' ? result : JSON.stringify(result)
|
||||
});
|
||||
}
|
||||
// Next turn: send tool results back to the model
|
||||
currentMessage = {
|
||||
role: 'user',
|
||||
parts: toolResultParts
|
||||
};
|
||||
}
|
||||
if (turn >= MAX_TURNS && !finalText) {
|
||||
finalText = `Agent reached the ${MAX_TURNS}-turn safety limit. Last tool call count: ${toolCallCount}.`;
|
||||
finalText = `Agent hit the ${MAX_TURNS}-turn safety limit. Tool calls made: ${toolCallCount}.`;
|
||||
}
|
||||
return { finalText, toolCallCount, turns: turn };
|
||||
return { finalText, toolCallCount, turns: turn, model: llm.modelId };
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user