refactor: split tools.ts into registry-based domain files

Replaces the single 800-line tools.ts and its switch dispatcher with a
Theia-inspired registry pattern — each tool domain is its own file, and
dispatch is a plain Map.get() call with no central routing function.

New structure in src/tools/:
  registry.ts   — ToolDefinition (with handler), registerTool(), executeTool(), ALL_TOOLS
  context.ts    — ToolContext, MemoryUpdate interfaces
  security.ts   — PROTECTED_* constants + assertGiteaWritable/assertCoolifyDeployable
  utils.ts      — safeResolve(), EXCLUDED set
  file.ts       — read_file, write_file, replace_in_file, list_directory, find_files, search_code
  shell.ts      — execute_command
  git.ts        — git_commit_and_push
  coolify.ts    — coolify_*, list_all_apps, get_app_status, deploy_app
  gitea.ts      — gitea_*, list_repos, list_all_issues, read_repo_file
  agent.ts      — spawn_agent, get_job_status
  memory.ts     — save_memory
  index.ts      — barrel with side-effect imports + re-exports

Adding a new tool now requires only a new file + registerTool() call.
No switch statement, no shared array to edit. External API unchanged.

Made-with: Cursor
This commit is contained in:
2026-03-01 15:27:29 -08:00
parent 7a601b57b8
commit e91e5e0e37
38 changed files with 1854 additions and 810 deletions

35
dist/orchestrator.js vendored
View File

@@ -102,10 +102,14 @@ async function orchestratorChat(sessionId, userMessage, ctx, opts) {
let finalReply = '';
let finalReasoning = null;
const toolCallNames = [];
// Build messages with system prompt prepended
// Build system prompt — inject project knowledge if provided
const systemContent = opts?.knowledgeContext
? `${SYSTEM_PROMPT}\n\n## Project Memory (known facts)\n${opts.knowledgeContext}`
: SYSTEM_PROMPT;
// Build messages with system prompt prepended; keep last 40 for cost control
const buildMessages = () => [
{ role: 'system', content: SYSTEM_PROMPT },
...session.history
{ role: 'system', content: systemContent },
...session.history.slice(-40)
];
while (turn < MAX_TURNS) {
turn++;
@@ -119,15 +123,20 @@ async function orchestratorChat(sessionId, userMessage, ctx, opts) {
// Record reasoning for the final turn (informational, not stored in history)
if (response.reasoning)
finalReasoning = response.reasoning;
// Build assistant message to add to history
const assistantMsg = {
role: 'assistant',
content: response.content,
tool_calls: response.tool_calls.length > 0 ? response.tool_calls : undefined
};
session.history.push(assistantMsg);
// Only push assistant message if it has actual content or tool calls;
// skip empty turns that result from mid-reasoning token exhaustion.
const hasContent = response.content !== null && response.content !== '';
const hasToolCalls = response.tool_calls.length > 0;
if (hasContent || hasToolCalls) {
const assistantMsg = {
role: 'assistant',
content: response.content,
tool_calls: hasToolCalls ? response.tool_calls : undefined
};
session.history.push(assistantMsg);
}
// No tool calls — we have the final answer
if (response.tool_calls.length === 0) {
if (!hasToolCalls) {
finalReply = response.content ?? '';
break;
}
@@ -166,7 +175,9 @@ async function orchestratorChat(sessionId, userMessage, ctx, opts) {
turns: turn,
toolCalls: toolCallNames,
model: llm.modelId,
history: session.history.slice(-40),
history: session.history
.filter(m => m.role !== 'assistant' || m.content || m.tool_calls?.length)
.slice(-40),
memoryUpdates: ctx.memoryUpdates
};
}