Firebase was not configured so every chat request crashed with 'Firebase Admin credentials not configured'. - chat-mode-resolver.ts: read project phase from fs_projects (Postgres) - chat-context.ts: load project data from fs_projects instead of Firestore - /api/ai/conversation: store/retrieve conversations in chat_conversations Postgres table (created automatically on first use) - /api/ai/chat: replace all Firestore reads/writes with Postgres queries - v_ai_chat/page.tsx: replace Firebase client auth with useSession from next-auth/react; remove Firestore listeners, use REST API for project data Co-authored-by: Cursor <cursoragent@cursor.com>
500 lines
19 KiB
TypeScript
500 lines
19 KiB
TypeScript
import { NextResponse } from 'next/server';
|
|
import { z } from 'zod';
|
|
import { GeminiLlmClient } from '@/lib/ai/gemini-client';
|
|
import type { LlmClient } from '@/lib/ai/llm-client';
|
|
import { query } from '@/lib/db-postgres';
|
|
import { MODE_SYSTEM_PROMPTS, ChatMode } from '@/lib/ai/chat-modes';
|
|
import { resolveChatMode } from '@/lib/server/chat-mode-resolver';
|
|
import {
|
|
buildProjectContextForChat,
|
|
determineArtifactsUsed,
|
|
formatContextForPrompt,
|
|
} from '@/lib/server/chat-context';
|
|
import { logProjectEvent } from '@/lib/server/logs';
|
|
import type { CollectorPhaseHandoff } from '@/lib/types/phase-handoff';
|
|
|
|
// Increase timeout for Gemini 3 Pro thinking mode (can take 1-2 minutes)
|
|
export const maxDuration = 180; // 3 minutes
|
|
export const dynamic = 'force-dynamic';
|
|
|
|
const ChatReplySchema = z.object({
|
|
reply: z.string(),
|
|
visionAnswers: z.object({
|
|
q1: z.string().optional(), // Answer to question 1
|
|
q2: z.string().optional(), // Answer to question 2
|
|
q3: z.string().optional(), // Answer to question 3
|
|
allAnswered: z.boolean().optional(), // True when all 3 are complete
|
|
}).optional(),
|
|
collectorHandoff: z.object({
|
|
hasDocuments: z.boolean().optional(),
|
|
documentCount: z.number().optional(),
|
|
githubConnected: z.boolean().optional(),
|
|
githubRepo: z.string().optional(),
|
|
extensionLinked: z.boolean().optional(),
|
|
extensionDeclined: z.boolean().optional(),
|
|
noGithubYet: z.boolean().optional(),
|
|
readyForExtraction: z.boolean().optional(),
|
|
}).optional(),
|
|
extractionReviewHandoff: z.object({
|
|
extractionApproved: z.boolean().optional(),
|
|
readyForVision: z.boolean().optional(),
|
|
}).optional(),
|
|
});
|
|
|
|
interface ChatRequestBody {
|
|
projectId?: string;
|
|
message?: string;
|
|
overrideMode?: ChatMode;
|
|
}
|
|
|
|
const ENSURE_CONV_TABLE = `
|
|
CREATE TABLE IF NOT EXISTS chat_conversations (
|
|
project_id text PRIMARY KEY,
|
|
messages jsonb NOT NULL DEFAULT '[]',
|
|
updated_at timestamptz NOT NULL DEFAULT NOW()
|
|
)
|
|
`;
|
|
|
|
async function appendConversation(
|
|
projectId: string,
|
|
newMessages: Array<{ role: 'user' | 'assistant'; content: string }>,
|
|
) {
|
|
await query(ENSURE_CONV_TABLE);
|
|
const now = new Date().toISOString();
|
|
const stamped = newMessages.map((m) => ({ ...m, createdAt: now }));
|
|
|
|
await query(
|
|
`INSERT INTO chat_conversations (project_id, messages, updated_at)
|
|
VALUES ($1, $2::jsonb, NOW())
|
|
ON CONFLICT (project_id) DO UPDATE
|
|
SET messages = chat_conversations.messages || $2::jsonb,
|
|
updated_at = NOW()`,
|
|
[projectId, JSON.stringify(stamped)]
|
|
);
|
|
}
|
|
|
|
export async function POST(request: Request) {
|
|
try {
|
|
const body = (await request.json()) as ChatRequestBody;
|
|
const projectId = body.projectId?.trim();
|
|
const message = body.message?.trim();
|
|
|
|
if (!projectId || !message) {
|
|
return NextResponse.json({ error: 'projectId and message are required' }, { status: 400 });
|
|
}
|
|
|
|
// Verify project exists in Postgres
|
|
const projectRows = await query<{ data: any }>(
|
|
`SELECT data FROM fs_projects WHERE id = $1 LIMIT 1`,
|
|
[projectId]
|
|
);
|
|
if (projectRows.length === 0) {
|
|
return NextResponse.json({ error: 'Project not found' }, { status: 404 });
|
|
}
|
|
const projectData = projectRows[0].data ?? {};
|
|
|
|
// Resolve chat mode (uses new resolver)
|
|
const resolvedMode = body.overrideMode ?? await resolveChatMode(projectId);
|
|
console.log(`[AI Chat] Mode: ${resolvedMode}`);
|
|
|
|
// Build comprehensive context with vector retrieval
|
|
// Only include GitHub analysis for MVP generation (not needed for vision questions)
|
|
const context = await buildProjectContextForChat(projectId, resolvedMode, message, {
|
|
retrievalLimit: 10,
|
|
includeVectorSearch: true,
|
|
includeGitHubAnalysis: resolvedMode === 'mvp_mode', // Only load repo analysis when generating MVP
|
|
});
|
|
|
|
console.log(`[AI Chat] Context built: ${context.retrievedChunks.length} vector chunks retrieved`);
|
|
|
|
// Get mode-specific system prompt
|
|
const systemPrompt = MODE_SYSTEM_PROMPTS[resolvedMode];
|
|
|
|
// Format context for LLM
|
|
const contextSummary = formatContextForPrompt(context);
|
|
|
|
// Prepare enhanced system prompt with context
|
|
const enhancedSystemPrompt = `${systemPrompt}
|
|
|
|
## Current Project Context
|
|
|
|
${contextSummary}
|
|
|
|
---
|
|
|
|
You have access to:
|
|
- Project artifacts (product model, MVP plan, marketing plan)
|
|
- Knowledge items (${context.knowledgeSummary.totalCount} total)
|
|
- Extraction signals (${context.extractionSummary.totalCount} analyzed)
|
|
${context.retrievedChunks.length > 0 ? `- ${context.retrievedChunks.length} relevant chunks from vector search (most similar to user's query)` : ''}
|
|
${context.repositoryAnalysis ? `- GitHub repository analysis (${context.repositoryAnalysis.totalFiles} files)` : ''}
|
|
${context.sessionHistory.totalSessions > 0 ? `- Complete Cursor session history (${context.sessionHistory.totalSessions} sessions, ${context.sessionHistory.messages.length} messages in chronological order)` : ''}
|
|
|
|
Use this context to provide specific, grounded responses. The session history shows your complete conversation history with the user - use it to understand what has been built and discussed.`;
|
|
|
|
// Load existing conversation history from Postgres
|
|
await query(ENSURE_CONV_TABLE);
|
|
const convRows = await query<{ messages: any[] }>(
|
|
`SELECT messages FROM chat_conversations WHERE project_id = $1`,
|
|
[projectId]
|
|
);
|
|
const conversationHistory: any[] = convRows[0]?.messages ?? [];
|
|
|
|
// Build full message context (history + current message)
|
|
const messages = [
|
|
...conversationHistory.map((msg: any) => ({
|
|
role: msg.role as 'user' | 'assistant',
|
|
content: msg.content as string,
|
|
})),
|
|
{
|
|
role: 'user' as const,
|
|
content: message,
|
|
},
|
|
];
|
|
|
|
console.log(`[AI Chat] Sending ${messages.length} messages to LLM (${conversationHistory.length} from history + 1 new)`);
|
|
console.log(`[AI Chat] Mode: ${resolvedMode}, Phase: ${projectData.currentPhase}, Has extraction: ${!!context.phaseHandoffs?.extraction}`);
|
|
|
|
// Log system prompt length
|
|
console.log(`[AI Chat] System prompt length: ${enhancedSystemPrompt.length} chars (~${Math.ceil(enhancedSystemPrompt.length / 4)} tokens)`);
|
|
|
|
// Log each message length
|
|
messages.forEach((msg, i) => {
|
|
console.log(`[AI Chat] Message ${i + 1} (${msg.role}): ${msg.content.length} chars (~${Math.ceil(msg.content.length / 4)} tokens)`);
|
|
});
|
|
|
|
const totalInputChars = enhancedSystemPrompt.length + messages.reduce((sum, msg) => sum + msg.content.length, 0);
|
|
console.log(`[AI Chat] Total input: ${totalInputChars} chars (~${Math.ceil(totalInputChars / 4)} tokens)`);
|
|
|
|
// Log system prompt preview (first 500 chars)
|
|
console.log(`[AI Chat] System prompt preview: ${enhancedSystemPrompt.substring(0, 500)}...`);
|
|
|
|
// Log last user message
|
|
const lastUserMsg = messages[messages.length - 1];
|
|
console.log(`[AI Chat] User message: ${lastUserMsg.content}`);
|
|
|
|
// Safety check: extraction_review_mode requires extraction results
|
|
if (resolvedMode === 'extraction_review_mode' && !context.phaseHandoffs?.extraction) {
|
|
console.warn(`[AI Chat] WARNING: extraction_review_mode active but no extraction results found for project ${projectId}`);
|
|
}
|
|
|
|
const llm: LlmClient = new GeminiLlmClient();
|
|
|
|
// Configure thinking mode based on task complexity
|
|
// Simple modes (collector, extraction_review) don't need deep thinking
|
|
// Complex modes (mvp, vision) benefit from extended reasoning
|
|
const needsThinking = resolvedMode === 'mvp_mode' || resolvedMode === 'vision_mode';
|
|
|
|
const reply = await llm.structuredCall<{
|
|
reply: string;
|
|
visionAnswers?: {
|
|
q1?: string;
|
|
q2?: string;
|
|
q3?: string;
|
|
allAnswered?: boolean;
|
|
};
|
|
collectorHandoff?: {
|
|
hasDocuments?: boolean;
|
|
documentCount?: number;
|
|
githubConnected?: boolean;
|
|
githubRepo?: string;
|
|
extensionLinked?: boolean;
|
|
extensionDeclined?: boolean;
|
|
noGithubYet?: boolean;
|
|
readyForExtraction?: boolean;
|
|
};
|
|
extractionReviewHandoff?: {
|
|
extractionApproved?: boolean;
|
|
readyForVision?: boolean;
|
|
};
|
|
}>({
|
|
model: 'gemini',
|
|
systemPrompt: enhancedSystemPrompt,
|
|
messages: messages, // Full conversation history!
|
|
schema: ChatReplySchema,
|
|
temperature: 0.4,
|
|
thinking_config: needsThinking ? {
|
|
thinking_level: 'high',
|
|
include_thoughts: false,
|
|
} : undefined,
|
|
});
|
|
|
|
// Store all vision answers when provided
|
|
if (reply.visionAnswers) {
|
|
const updates: any = {};
|
|
|
|
if (reply.visionAnswers.q1) {
|
|
updates['visionAnswers.q1'] = reply.visionAnswers.q1;
|
|
console.log('[AI Chat] Storing vision answer Q1');
|
|
}
|
|
if (reply.visionAnswers.q2) {
|
|
updates['visionAnswers.q2'] = reply.visionAnswers.q2;
|
|
console.log('[AI Chat] Storing vision answer Q2');
|
|
}
|
|
if (reply.visionAnswers.q3) {
|
|
updates['visionAnswers.q3'] = reply.visionAnswers.q3;
|
|
console.log('[AI Chat] Storing vision answer Q3');
|
|
}
|
|
|
|
// If all answers are complete, trigger MVP generation
|
|
if (reply.visionAnswers.allAnswered) {
|
|
updates['visionAnswers.allAnswered'] = true;
|
|
updates['readyForMVP'] = true;
|
|
console.log('[AI Chat] ✅ All 3 vision answers complete - ready for MVP generation');
|
|
}
|
|
|
|
if (Object.keys(updates).length > 0) {
|
|
updates['visionAnswers.updatedAt'] = new Date().toISOString();
|
|
|
|
await query(
|
|
`UPDATE fs_projects
|
|
SET data = data || $1::jsonb
|
|
WHERE id = $2`,
|
|
[JSON.stringify({ visionAnswers: updates }), projectId]
|
|
).catch((error) => {
|
|
console.error('[ai/chat] Failed to store vision answers', error);
|
|
});
|
|
}
|
|
}
|
|
|
|
// Best-effort: append this turn to the persisted conversation history
|
|
appendConversation(projectId, [
|
|
{ role: 'user', content: message },
|
|
{ role: 'assistant', content: reply.reply },
|
|
]).catch((error) => {
|
|
console.error('[ai/chat] Failed to append conversation history', error);
|
|
});
|
|
|
|
// If in collector mode, always update handoff state based on actual project context
|
|
// This ensures the checklist updates even if AI doesn't return collectorHandoff
|
|
if (resolvedMode === 'collector_mode') {
|
|
// Derive handoff state from actual project context
|
|
const hasDocuments = (context.knowledgeSummary.bySourceType['imported_document'] ?? 0) > 0;
|
|
const documentCount = context.knowledgeSummary.bySourceType['imported_document'] ?? 0;
|
|
const githubConnected = !!context.project.githubRepo;
|
|
const extensionLinked = context.project.extensionLinked ?? false;
|
|
|
|
// Check if AI indicated readiness (from reply if provided, otherwise check reply text)
|
|
let readyForExtraction = reply.collectorHandoff?.readyForExtraction ?? false;
|
|
|
|
// Fallback: If AI says certain phrases, assume user confirmed readiness
|
|
// IMPORTANT: These phrases must be SPECIFIC to avoid false positives
|
|
if (!readyForExtraction && reply.reply) {
|
|
const replyLower = reply.reply.toLowerCase();
|
|
|
|
// Check for explicit analysis/digging phrases (not just "perfect!")
|
|
const analysisKeywords = ['analyze', 'analyzing', 'digging', 'extraction', 'processing'];
|
|
const hasAnalysisKeyword = analysisKeywords.some(keyword => replyLower.includes(keyword));
|
|
|
|
// Only trigger if AI mentions BOTH readiness AND analysis action
|
|
if (hasAnalysisKeyword) {
|
|
const confirmPhrases = [
|
|
'let me analyze what you',
|
|
'i\'ll start digging into',
|
|
'i\'m starting the analysis',
|
|
'running the extraction',
|
|
'processing what you\'ve shared',
|
|
];
|
|
readyForExtraction = confirmPhrases.some(phrase => replyLower.includes(phrase));
|
|
|
|
if (readyForExtraction) {
|
|
console.log(`[AI Chat] Detected readiness from AI reply text: "${reply.reply.substring(0, 100)}"`);
|
|
}
|
|
}
|
|
}
|
|
|
|
const handoff: CollectorPhaseHandoff = {
|
|
phase: 'collector',
|
|
readyForNextPhase: readyForExtraction,
|
|
confidence: readyForExtraction ? 0.9 : 0.5,
|
|
confirmed: {
|
|
hasDocuments,
|
|
documentCount,
|
|
githubConnected,
|
|
githubRepo: context.project.githubRepo ?? undefined,
|
|
extensionLinked,
|
|
},
|
|
uncertain: {
|
|
extensionDeclined: reply.collectorHandoff?.extensionDeclined ?? false,
|
|
noGithubYet: reply.collectorHandoff?.noGithubYet ?? false,
|
|
},
|
|
missing: [],
|
|
questionsForUser: [],
|
|
sourceEvidence: [],
|
|
version: '1.0',
|
|
timestamp: new Date().toISOString(),
|
|
};
|
|
|
|
// Persist to project phaseData in Postgres
|
|
await query(
|
|
`UPDATE fs_projects
|
|
SET data = jsonb_set(
|
|
data,
|
|
'{phaseData,phaseHandoffs,collector}',
|
|
$1::jsonb,
|
|
true
|
|
)
|
|
WHERE id = $2`,
|
|
[JSON.stringify(handoff), projectId]
|
|
).catch((error) => {
|
|
console.error('[ai/chat] Failed to persist collector handoff', error);
|
|
});
|
|
|
|
console.log(`[AI Chat] Collector handoff persisted:`, {
|
|
hasDocuments: handoff.confirmed.hasDocuments,
|
|
githubConnected: handoff.confirmed.githubConnected,
|
|
extensionLinked: handoff.confirmed.extensionLinked,
|
|
readyForExtraction: handoff.readyForNextPhase,
|
|
});
|
|
|
|
// Auto-transition to extraction phase if ready
|
|
if (handoff.readyForNextPhase) {
|
|
console.log(`[AI Chat] Collector complete - triggering backend extraction`);
|
|
|
|
// Mark collector as complete
|
|
await query(
|
|
`UPDATE fs_projects
|
|
SET data = jsonb_set(data, '{phaseData,collectorCompletedAt}', $1::jsonb, true)
|
|
WHERE id = $2`,
|
|
[JSON.stringify(new Date().toISOString()), projectId]
|
|
).catch((error) => {
|
|
console.error('[ai/chat] Failed to mark collector complete', error);
|
|
});
|
|
|
|
// Trigger backend extraction (async - don't await)
|
|
import('@/lib/server/backend-extractor').then(({ runBackendExtractionForProject }) => {
|
|
runBackendExtractionForProject(projectId).catch((error) => {
|
|
console.error(`[AI Chat] Backend extraction failed for project ${projectId}:`, error);
|
|
});
|
|
});
|
|
}
|
|
}
|
|
|
|
// Handle extraction review → vision phase transition
|
|
if (resolvedMode === 'extraction_review_mode') {
|
|
// Check if AI indicated extraction is approved and ready for vision
|
|
let readyForVision = reply.extractionReviewHandoff?.readyForVision ?? false;
|
|
|
|
// Fallback: Check reply text for approval phrases
|
|
if (!readyForVision && reply.reply) {
|
|
const replyLower = reply.reply.toLowerCase();
|
|
|
|
// Check for vision transition phrases
|
|
const visionKeywords = ['vision', 'mvp', 'roadmap', 'plan'];
|
|
const hasVisionKeyword = visionKeywords.some(keyword => replyLower.includes(keyword));
|
|
|
|
if (hasVisionKeyword) {
|
|
const confirmPhrases = [
|
|
'ready to move to',
|
|
'ready for vision',
|
|
'let\'s move to vision',
|
|
'moving to vision',
|
|
'great! let\'s define',
|
|
'perfect! now let\'s',
|
|
];
|
|
readyForVision = confirmPhrases.some(phrase => replyLower.includes(phrase));
|
|
|
|
if (readyForVision) {
|
|
console.log(`[AI Chat] Detected vision readiness from AI reply text: "${reply.reply.substring(0, 100)}"`);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (readyForVision) {
|
|
console.log(`[AI Chat] Extraction review complete - transitioning to vision phase`);
|
|
|
|
// Mark extraction review as complete and transition to vision
|
|
await query(
|
|
`UPDATE fs_projects
|
|
SET data = data
|
|
|| '{"currentPhase":"vision","phaseStatus":"in_progress"}'::jsonb
|
|
|| jsonb_build_object('phaseData',
|
|
(data->'phaseData') || jsonb_build_object(
|
|
'extractionReviewCompletedAt', $1::text
|
|
)
|
|
)
|
|
WHERE id = $2`,
|
|
[new Date().toISOString(), projectId]
|
|
).catch((error) => {
|
|
console.error('[ai/chat] Failed to transition to vision phase', error);
|
|
});
|
|
}
|
|
}
|
|
|
|
// Save conversation history to Postgres
|
|
await appendConversation(projectId, [
|
|
{ role: 'user', content: message },
|
|
{ role: 'assistant', content: reply.reply },
|
|
]).catch((error) => {
|
|
console.error('[ai/chat] Failed to save conversation history', error);
|
|
});
|
|
|
|
console.log(`[AI Chat] Conversation history saved (+2 messages)`);
|
|
|
|
// Determine which artifacts were used
|
|
const artifactsUsed = determineArtifactsUsed(context);
|
|
|
|
// Log successful interaction
|
|
logProjectEvent({
|
|
projectId,
|
|
userId: projectData.userId ?? null,
|
|
eventType: 'chat_interaction',
|
|
mode: resolvedMode,
|
|
phase: projectData.currentPhase ?? null,
|
|
artifactsUsed,
|
|
usedVectorSearch: context.retrievedChunks.length > 0,
|
|
vectorChunkCount: context.retrievedChunks.length,
|
|
promptVersion: '2.0', // Updated with vector search
|
|
modelUsed: process.env.VERTEX_AI_MODEL || 'gemini-3-pro-preview',
|
|
success: true,
|
|
errorMessage: null,
|
|
metadata: {
|
|
knowledgeCount: context.knowledgeSummary.totalCount,
|
|
extractionCount: context.extractionSummary.totalCount,
|
|
hasGithubRepo: !!context.repositoryAnalysis,
|
|
},
|
|
}).catch((err) => console.error('[ai/chat] Failed to log event:', err));
|
|
|
|
return NextResponse.json({
|
|
reply: reply.reply,
|
|
mode: resolvedMode,
|
|
projectPhase: projectData.currentPhase ?? null,
|
|
artifactsUsed,
|
|
usedVectorSearch: context.retrievedChunks.length > 0,
|
|
});
|
|
} catch (error) {
|
|
console.error('[ai/chat] Error handling chat request', error);
|
|
|
|
// Log error (best-effort) - extract projectId from request body if available
|
|
const errorProjectId = typeof (error as { projectId?: string })?.projectId === 'string'
|
|
? (error as { projectId: string }).projectId
|
|
: null;
|
|
|
|
if (errorProjectId) {
|
|
logProjectEvent({
|
|
projectId: errorProjectId,
|
|
userId: null,
|
|
eventType: 'error',
|
|
mode: null,
|
|
phase: null,
|
|
artifactsUsed: [],
|
|
usedVectorSearch: false,
|
|
promptVersion: '2.0',
|
|
modelUsed: process.env.VERTEX_AI_MODEL || 'gemini-3-pro-preview',
|
|
success: false,
|
|
errorMessage: error instanceof Error ? error.message : String(error),
|
|
}).catch((err) => console.error('[ai/chat] Failed to log error:', err));
|
|
}
|
|
|
|
return NextResponse.json(
|
|
{
|
|
error: 'Failed to process chat message',
|
|
details: error instanceof Error ? error.message : String(error),
|
|
},
|
|
{ status: 500 },
|
|
);
|
|
}
|
|
}
|
|
|
|
|