- Dockerfile now runs tsc during build so committed dist/ is never stale - ChatResult interface was missing history[] and memoryUpdates[] fields - Re-add missing MemoryUpdate import in orchestrator.ts - Rebuild dist/ with all new fields included Made-with: Cursor
81 lines
2.9 KiB
JavaScript
81 lines
2.9 KiB
JavaScript
"use strict";
|
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
exports.runAgent = runAgent;
|
|
const llm_1 = require("./llm");
|
|
const tools_1 = require("./tools");
|
|
const job_store_1 = require("./job-store");
|
|
const MAX_TURNS = 40;
|
|
/**
|
|
* Core agent execution loop — model-agnostic via the unified LLM client.
|
|
*
|
|
* Agents use their configured model tier (A/B/C) or a specific model ID.
|
|
* Tool calling uses OpenAI format throughout.
|
|
*/
|
|
async function runAgent(job, config, task, ctx) {
|
|
const llm = (0, llm_1.createLLM)(config.model, { temperature: 0.2 });
|
|
const oaiTools = (0, llm_1.toOAITools)(config.tools);
|
|
const history = [
|
|
{ role: 'user', content: task }
|
|
];
|
|
let toolCallCount = 0;
|
|
let turn = 0;
|
|
let finalText = '';
|
|
(0, job_store_1.updateJob)(job.id, { status: 'running', progress: `Starting ${config.name} (${llm.modelId})…` });
|
|
while (turn < MAX_TURNS) {
|
|
turn++;
|
|
const messages = [
|
|
{ role: 'system', content: config.systemPrompt },
|
|
...history
|
|
];
|
|
const response = await llm.chat(messages, oaiTools, 8192);
|
|
// Build assistant message for history
|
|
const assistantMsg = {
|
|
role: 'assistant',
|
|
content: response.content,
|
|
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
|
|
};
|
|
history.push(assistantMsg);
|
|
// No tool calls — agent is done
|
|
if (response.tool_calls.length === 0) {
|
|
finalText = response.content ?? '';
|
|
break;
|
|
}
|
|
// Execute tool calls
|
|
for (const tc of response.tool_calls) {
|
|
const fnName = tc.function.name;
|
|
let fnArgs = {};
|
|
try {
|
|
fnArgs = JSON.parse(tc.function.arguments || '{}');
|
|
}
|
|
catch { /* bad JSON */ }
|
|
toolCallCount++;
|
|
(0, job_store_1.updateJob)(job.id, {
|
|
progress: `Turn ${turn}: calling ${fnName}…`,
|
|
toolCalls: [...(job.toolCalls || []), {
|
|
turn,
|
|
tool: fnName,
|
|
args: fnArgs,
|
|
timestamp: new Date().toISOString()
|
|
}]
|
|
});
|
|
let result;
|
|
try {
|
|
result = await (0, tools_1.executeTool)(fnName, fnArgs, ctx);
|
|
}
|
|
catch (err) {
|
|
result = { error: err instanceof Error ? err.message : String(err) };
|
|
}
|
|
history.push({
|
|
role: 'tool',
|
|
tool_call_id: tc.id,
|
|
name: fnName,
|
|
content: typeof result === 'string' ? result : JSON.stringify(result)
|
|
});
|
|
}
|
|
}
|
|
if (turn >= MAX_TURNS && !finalText) {
|
|
finalText = `Agent hit the ${MAX_TURNS}-turn safety limit. Tool calls made: ${toolCallCount}.`;
|
|
}
|
|
return { finalText, toolCallCount, turns: turn, model: llm.modelId };
|
|
}
|