diff --git a/src/atlas.ts b/src/atlas.ts index 1871ca9..8016904 100644 --- a/src/atlas.ts +++ b/src/atlas.ts @@ -88,14 +88,23 @@ export async function atlasChat( const oaiTools = toOAITools(ATLAS_TOOLS); const systemPrompt = resolvePrompt('atlas'); - // For init triggers, don't add the synthetic prompt as a user turn - if (!opts?.isInit) { - session.history.push({ role: 'user', content: userMessage }); - } + // Always push the user message so Gemini gets a valid conversation (requires at least one user turn). + // For init triggers, we mark it so we can strip it from the returned history — it's an internal + // prompt, not a real user message, and shouldn't appear in the conversation UI or DB. + const INIT_MARKER = '__atlas_init_marker__'; + session.history.push({ + role: 'user', + content: opts?.isInit ? INIT_MARKER + userMessage : userMessage + }); const buildMessages = (): LLMMessage[] => [ { role: 'system', content: systemPrompt }, - ...session.history.slice(-60) + ...session.history.slice(-60).map(m => + // Strip the init marker before sending to the LLM + m.role === 'user' && typeof m.content === 'string' && m.content.startsWith(INIT_MARKER) + ? { ...m, content: m.content.slice(INIT_MARKER.length) } + : m + ) ]; let turn = 0; @@ -156,6 +165,8 @@ export async function atlasChat( reply: finalReply, sessionId, history: session.history + // Drop the internal init user turn — it's not a real user message + .filter(m => !(m.role === 'user' && typeof m.content === 'string' && m.content.startsWith(INIT_MARKER))) .filter(m => m.role !== 'assistant' || m.content || m.tool_calls?.length) .slice(-60), prdContent,