VIBN Frontend for Coolify deployment

This commit is contained in:
2026-02-15 19:25:52 -08:00
commit 40bf8428cd
398 changed files with 76513 additions and 0 deletions

504
app/api/ai/chat/route.ts Normal file
View File

@@ -0,0 +1,504 @@
import { NextResponse } from 'next/server';
import { z } from 'zod';
import { GeminiLlmClient } from '@/lib/ai/gemini-client';
import type { LlmClient } from '@/lib/ai/llm-client';
import { getAdminDb } from '@/lib/firebase/admin';
import { FieldValue } from 'firebase-admin/firestore';
import { MODE_SYSTEM_PROMPTS, ChatMode } from '@/lib/ai/chat-modes';
import { resolveChatMode } from '@/lib/server/chat-mode-resolver';
import {
buildProjectContextForChat,
determineArtifactsUsed,
formatContextForPrompt,
} from '@/lib/server/chat-context';
import { logProjectEvent } from '@/lib/server/logs';
import type { CollectorPhaseHandoff } from '@/lib/types/phase-handoff';
// Increase timeout for Gemini 3 Pro thinking mode (can take 1-2 minutes)
export const maxDuration = 180; // 3 minutes
export const dynamic = 'force-dynamic';
const ChatReplySchema = z.object({
reply: z.string(),
visionAnswers: z.object({
q1: z.string().optional(), // Answer to question 1
q2: z.string().optional(), // Answer to question 2
q3: z.string().optional(), // Answer to question 3
allAnswered: z.boolean().optional(), // True when all 3 are complete
}).optional(),
collectorHandoff: z.object({
hasDocuments: z.boolean().optional(),
documentCount: z.number().optional(),
githubConnected: z.boolean().optional(),
githubRepo: z.string().optional(),
extensionLinked: z.boolean().optional(),
extensionDeclined: z.boolean().optional(),
noGithubYet: z.boolean().optional(),
readyForExtraction: z.boolean().optional(),
}).optional(),
extractionReviewHandoff: z.object({
extractionApproved: z.boolean().optional(),
readyForVision: z.boolean().optional(),
}).optional(),
});
interface ChatRequestBody {
projectId?: string;
message?: string;
overrideMode?: ChatMode;
}
async function appendConversation(
projectId: string,
messages: Array<{ role: 'user' | 'assistant'; content: string }>,
) {
const adminDb = getAdminDb();
const docRef = adminDb.collection('chat_conversations').doc(projectId);
await adminDb.runTransaction(async (tx) => {
const snapshot = await tx.get(docRef);
const existing = (snapshot.exists ? (snapshot.data()?.messages as unknown[]) : []) ?? [];
const now = new Date().toISOString();
const newMessages = messages.map((m) => ({
role: m.role,
content: m.content,
// Use a simple ISO string for message timestamps to avoid FieldValue
// restrictions inside arrays.
createdAt: now,
}));
tx.set(
docRef,
{
projectId,
messages: [...existing, ...newMessages],
updatedAt: FieldValue.serverTimestamp(),
},
{ merge: true },
);
});
}
export async function POST(request: Request) {
try {
const body = (await request.json()) as ChatRequestBody;
const projectId = body.projectId?.trim();
const message = body.message?.trim();
if (!projectId || !message) {
return NextResponse.json({ error: 'projectId and message are required' }, { status: 400 });
}
// Verify project exists
const adminDb = getAdminDb();
const projectSnapshot = await adminDb.collection('projects').doc(projectId).get();
if (!projectSnapshot.exists) {
return NextResponse.json({ error: 'Project not found' }, { status: 404 });
}
const projectData = projectSnapshot.data() ?? {};
// Resolve chat mode (uses new resolver)
const resolvedMode = body.overrideMode ?? await resolveChatMode(projectId);
console.log(`[AI Chat] Mode: ${resolvedMode}`);
// Build comprehensive context with vector retrieval
// Only include GitHub analysis for MVP generation (not needed for vision questions)
const context = await buildProjectContextForChat(projectId, resolvedMode, message, {
retrievalLimit: 10,
includeVectorSearch: true,
includeGitHubAnalysis: resolvedMode === 'mvp_mode', // Only load repo analysis when generating MVP
});
console.log(`[AI Chat] Context built: ${context.retrievedChunks.length} vector chunks retrieved`);
// Get mode-specific system prompt
const systemPrompt = MODE_SYSTEM_PROMPTS[resolvedMode];
// Format context for LLM
const contextSummary = formatContextForPrompt(context);
// Prepare enhanced system prompt with context
const enhancedSystemPrompt = `${systemPrompt}
## Current Project Context
${contextSummary}
---
You have access to:
- Project artifacts (product model, MVP plan, marketing plan)
- Knowledge items (${context.knowledgeSummary.totalCount} total)
- Extraction signals (${context.extractionSummary.totalCount} analyzed)
${context.retrievedChunks.length > 0 ? `- ${context.retrievedChunks.length} relevant chunks from vector search (most similar to user's query)` : ''}
${context.repositoryAnalysis ? `- GitHub repository analysis (${context.repositoryAnalysis.totalFiles} files)` : ''}
${context.sessionHistory.totalSessions > 0 ? `- Complete Cursor session history (${context.sessionHistory.totalSessions} sessions, ${context.sessionHistory.messages.length} messages in chronological order)` : ''}
Use this context to provide specific, grounded responses. The session history shows your complete conversation history with the user - use it to understand what has been built and discussed.`;
// Load existing conversation history
const conversationDoc = await adminDb.collection('chat_conversations').doc(projectId).get();
const conversationData = conversationDoc.exists ? conversationDoc.data() : null;
const conversationHistory = Array.isArray(conversationData?.messages)
? conversationData.messages
: [];
// Build full message context (history + current message)
const messages = [
...conversationHistory.map((msg: any) => ({
role: msg.role as 'user' | 'assistant',
content: msg.content as string,
})),
{
role: 'user' as const,
content: message,
},
];
console.log(`[AI Chat] Sending ${messages.length} messages to LLM (${conversationHistory.length} from history + 1 new)`);
console.log(`[AI Chat] Mode: ${resolvedMode}, Phase: ${projectData.currentPhase}, Has extraction: ${!!context.phaseHandoffs?.extraction}`);
// Log system prompt length
console.log(`[AI Chat] System prompt length: ${enhancedSystemPrompt.length} chars (~${Math.ceil(enhancedSystemPrompt.length / 4)} tokens)`);
// Log each message length
messages.forEach((msg, i) => {
console.log(`[AI Chat] Message ${i + 1} (${msg.role}): ${msg.content.length} chars (~${Math.ceil(msg.content.length / 4)} tokens)`);
});
const totalInputChars = enhancedSystemPrompt.length + messages.reduce((sum, msg) => sum + msg.content.length, 0);
console.log(`[AI Chat] Total input: ${totalInputChars} chars (~${Math.ceil(totalInputChars / 4)} tokens)`);
// Log system prompt preview (first 500 chars)
console.log(`[AI Chat] System prompt preview: ${enhancedSystemPrompt.substring(0, 500)}...`);
// Log last user message
const lastUserMsg = messages[messages.length - 1];
console.log(`[AI Chat] User message: ${lastUserMsg.content}`);
// Safety check: extraction_review_mode requires extraction results
if (resolvedMode === 'extraction_review_mode' && !context.phaseHandoffs?.extraction) {
console.warn(`[AI Chat] WARNING: extraction_review_mode active but no extraction results found for project ${projectId}`);
}
const llm: LlmClient = new GeminiLlmClient();
// Configure thinking mode based on task complexity
// Simple modes (collector, extraction_review) don't need deep thinking
// Complex modes (mvp, vision) benefit from extended reasoning
const needsThinking = resolvedMode === 'mvp_mode' || resolvedMode === 'vision_mode';
const reply = await llm.structuredCall<{
reply: string;
visionAnswers?: {
q1?: string;
q2?: string;
q3?: string;
allAnswered?: boolean;
};
collectorHandoff?: {
hasDocuments?: boolean;
documentCount?: number;
githubConnected?: boolean;
githubRepo?: string;
extensionLinked?: boolean;
extensionDeclined?: boolean;
noGithubYet?: boolean;
readyForExtraction?: boolean;
};
extractionReviewHandoff?: {
extractionApproved?: boolean;
readyForVision?: boolean;
};
}>({
model: 'gemini',
systemPrompt: enhancedSystemPrompt,
messages: messages, // Full conversation history!
schema: ChatReplySchema,
temperature: 0.4,
thinking_config: needsThinking ? {
thinking_level: 'high',
include_thoughts: false,
} : undefined,
});
// Store all vision answers when provided
if (reply.visionAnswers) {
const updates: any = {};
if (reply.visionAnswers.q1) {
updates['visionAnswers.q1'] = reply.visionAnswers.q1;
console.log('[AI Chat] Storing vision answer Q1');
}
if (reply.visionAnswers.q2) {
updates['visionAnswers.q2'] = reply.visionAnswers.q2;
console.log('[AI Chat] Storing vision answer Q2');
}
if (reply.visionAnswers.q3) {
updates['visionAnswers.q3'] = reply.visionAnswers.q3;
console.log('[AI Chat] Storing vision answer Q3');
}
// If all answers are complete, trigger MVP generation
if (reply.visionAnswers.allAnswered) {
updates['visionAnswers.allAnswered'] = true;
updates['readyForMVP'] = true;
console.log('[AI Chat] ✅ All 3 vision answers complete - ready for MVP generation');
}
if (Object.keys(updates).length > 0) {
updates['visionAnswers.updatedAt'] = new Date().toISOString();
await adminDb.collection('projects').doc(projectId).set(updates, { merge: true })
.catch((error) => {
console.error('[ai/chat] Failed to store vision answers', error);
});
}
}
// Best-effort: append this turn to the persisted conversation history
appendConversation(projectId, [
{ role: 'user', content: message },
{ role: 'assistant', content: reply.reply },
]).catch((error) => {
console.error('[ai/chat] Failed to append conversation history', error);
});
// If in collector mode, always update handoff state based on actual project context
// This ensures the checklist updates even if AI doesn't return collectorHandoff
if (resolvedMode === 'collector_mode') {
// Derive handoff state from actual project context
const hasDocuments = (context.knowledgeSummary.bySourceType['imported_document'] ?? 0) > 0;
const documentCount = context.knowledgeSummary.bySourceType['imported_document'] ?? 0;
const githubConnected = !!context.project.githubRepo;
const extensionLinked = context.project.extensionLinked ?? false;
// Check if AI indicated readiness (from reply if provided, otherwise check reply text)
let readyForExtraction = reply.collectorHandoff?.readyForExtraction ?? false;
// Fallback: If AI says certain phrases, assume user confirmed readiness
// IMPORTANT: These phrases must be SPECIFIC to avoid false positives
if (!readyForExtraction && reply.reply) {
const replyLower = reply.reply.toLowerCase();
// Check for explicit analysis/digging phrases (not just "perfect!")
const analysisKeywords = ['analyze', 'analyzing', 'digging', 'extraction', 'processing'];
const hasAnalysisKeyword = analysisKeywords.some(keyword => replyLower.includes(keyword));
// Only trigger if AI mentions BOTH readiness AND analysis action
if (hasAnalysisKeyword) {
const confirmPhrases = [
'let me analyze what you',
'i\'ll start digging into',
'i\'m starting the analysis',
'running the extraction',
'processing what you\'ve shared',
];
readyForExtraction = confirmPhrases.some(phrase => replyLower.includes(phrase));
if (readyForExtraction) {
console.log(`[AI Chat] Detected readiness from AI reply text: "${reply.reply.substring(0, 100)}"`);
}
}
}
const handoff: CollectorPhaseHandoff = {
phase: 'collector',
readyForNextPhase: readyForExtraction,
confidence: readyForExtraction ? 0.9 : 0.5,
confirmed: {
hasDocuments,
documentCount,
githubConnected,
githubRepo: context.project.githubRepo ?? undefined,
extensionLinked,
},
uncertain: {
extensionDeclined: reply.collectorHandoff?.extensionDeclined ?? false,
noGithubYet: reply.collectorHandoff?.noGithubYet ?? false,
},
missing: [],
questionsForUser: [],
sourceEvidence: [],
version: '1.0',
timestamp: new Date().toISOString(),
};
// Persist to project phaseData
await adminDb.collection('projects').doc(projectId).set(
{
'phaseData.phaseHandoffs.collector': handoff,
},
{ merge: true }
).catch((error) => {
console.error('[ai/chat] Failed to persist collector handoff', error);
});
console.log(`[AI Chat] Collector handoff persisted:`, {
hasDocuments: handoff.confirmed.hasDocuments,
githubConnected: handoff.confirmed.githubConnected,
extensionLinked: handoff.confirmed.extensionLinked,
readyForExtraction: handoff.readyForNextPhase,
});
// Auto-transition to extraction phase if ready
if (handoff.readyForNextPhase) {
console.log(`[AI Chat] Collector complete - triggering backend extraction`);
// Mark collector as complete
await adminDb.collection('projects').doc(projectId).update({
'phaseData.collectorCompletedAt': new Date().toISOString(),
}).catch((error) => {
console.error('[ai/chat] Failed to mark collector complete', error);
});
// Trigger backend extraction (async - don't await)
import('@/lib/server/backend-extractor').then(({ runBackendExtractionForProject }) => {
runBackendExtractionForProject(projectId).catch((error) => {
console.error(`[AI Chat] Backend extraction failed for project ${projectId}:`, error);
});
});
}
}
// Handle extraction review → vision phase transition
if (resolvedMode === 'extraction_review_mode') {
// Check if AI indicated extraction is approved and ready for vision
let readyForVision = reply.extractionReviewHandoff?.readyForVision ?? false;
// Fallback: Check reply text for approval phrases
if (!readyForVision && reply.reply) {
const replyLower = reply.reply.toLowerCase();
// Check for vision transition phrases
const visionKeywords = ['vision', 'mvp', 'roadmap', 'plan'];
const hasVisionKeyword = visionKeywords.some(keyword => replyLower.includes(keyword));
if (hasVisionKeyword) {
const confirmPhrases = [
'ready to move to',
'ready for vision',
'let\'s move to vision',
'moving to vision',
'great! let\'s define',
'perfect! now let\'s',
];
readyForVision = confirmPhrases.some(phrase => replyLower.includes(phrase));
if (readyForVision) {
console.log(`[AI Chat] Detected vision readiness from AI reply text: "${reply.reply.substring(0, 100)}"`);
}
}
}
if (readyForVision) {
console.log(`[AI Chat] Extraction review complete - transitioning to vision phase`);
// Mark extraction review as complete and transition to vision
await adminDb.collection('projects').doc(projectId).update({
currentPhase: 'vision',
phaseStatus: 'in_progress',
'phaseData.extractionReviewCompletedAt': new Date().toISOString(),
}).catch((error) => {
console.error('[ai/chat] Failed to transition to vision phase', error);
});
}
}
// Save conversation history
const newConversationHistory = [
...conversationHistory,
{
role: 'user' as const,
content: message,
createdAt: new Date().toISOString(),
},
{
role: 'assistant' as const,
content: reply.reply,
createdAt: new Date().toISOString(),
},
];
await adminDb.collection('chat_conversations').doc(projectId).set(
{
projectId,
userId: projectData.userId,
messages: newConversationHistory,
updatedAt: new Date().toISOString(),
},
{ merge: true }
).catch((error) => {
console.error('[ai/chat] Failed to save conversation history', error);
});
console.log(`[AI Chat] Conversation history saved (${newConversationHistory.length} total messages)`);
// Determine which artifacts were used
const artifactsUsed = determineArtifactsUsed(context);
// Log successful interaction
logProjectEvent({
projectId,
userId: projectData.userId ?? null,
eventType: 'chat_interaction',
mode: resolvedMode,
phase: projectData.currentPhase ?? null,
artifactsUsed,
usedVectorSearch: context.retrievedChunks.length > 0,
vectorChunkCount: context.retrievedChunks.length,
promptVersion: '2.0', // Updated with vector search
modelUsed: process.env.VERTEX_AI_MODEL || 'gemini-3-pro-preview',
success: true,
errorMessage: null,
metadata: {
knowledgeCount: context.knowledgeSummary.totalCount,
extractionCount: context.extractionSummary.totalCount,
hasGithubRepo: !!context.repositoryAnalysis,
},
}).catch((err) => console.error('[ai/chat] Failed to log event:', err));
return NextResponse.json({
reply: reply.reply,
mode: resolvedMode,
projectPhase: projectData.currentPhase ?? null,
artifactsUsed,
usedVectorSearch: context.retrievedChunks.length > 0,
});
} catch (error) {
console.error('[ai/chat] Error handling chat request', error);
// Log error (best-effort) - extract projectId from request body if available
const errorProjectId = typeof (error as { projectId?: string })?.projectId === 'string'
? (error as { projectId: string }).projectId
: null;
if (errorProjectId) {
logProjectEvent({
projectId: errorProjectId,
userId: null,
eventType: 'error',
mode: null,
phase: null,
artifactsUsed: [],
usedVectorSearch: false,
promptVersion: '2.0',
modelUsed: process.env.VERTEX_AI_MODEL || 'gemini-3-pro-preview',
success: false,
errorMessage: error instanceof Error ? error.message : String(error),
}).catch((err) => console.error('[ai/chat] Failed to log error:', err));
}
return NextResponse.json(
{
error: 'Failed to process chat message',
details: error instanceof Error ? error.message : String(error),
},
{ status: 500 },
);
}
}

View File

@@ -0,0 +1,37 @@
import { NextResponse } from 'next/server';
import { getAdminDb } from '@/lib/firebase/admin';
export async function POST(request: Request) {
try {
const url = new URL(request.url);
const body = await request
.json()
.catch(() => ({ projectId: url.searchParams.get('projectId') }));
const projectId = (body?.projectId ?? url.searchParams.get('projectId') ?? '').trim();
if (!projectId) {
return NextResponse.json(
{ error: 'projectId is required' },
{ status: 400 },
);
}
const adminDb = getAdminDb();
const docRef = adminDb.collection('chat_conversations').doc(projectId);
await docRef.delete();
return NextResponse.json({ success: true });
} catch (error) {
console.error('[ai/conversation/reset] Failed to reset conversation', error);
return NextResponse.json(
{
error: 'Failed to reset conversation',
details: error instanceof Error ? error.message : String(error),
},
{ status: 500 },
);
}
}

View File

@@ -0,0 +1,54 @@
import { NextResponse } from 'next/server';
import { getAdminDb } from '@/lib/firebase/admin';
type StoredMessageRole = 'user' | 'assistant';
type ConversationMessage = {
role: StoredMessageRole;
content: string;
createdAt?: { _seconds: number; _nanoseconds: number };
};
type ConversationResponse = {
messages: ConversationMessage[];
};
export async function GET(request: Request) {
try {
const url = new URL(request.url);
const projectId = (url.searchParams.get('projectId') ?? '').trim();
if (!projectId) {
return NextResponse.json(
{ error: 'projectId is required' },
{ status: 400 },
);
}
const adminDb = getAdminDb();
const docRef = adminDb.collection('chat_conversations').doc(projectId);
const snapshot = await docRef.get();
if (!snapshot.exists) {
const empty: ConversationResponse = { messages: [] };
return NextResponse.json(empty);
}
const data = snapshot.data() as { messages?: ConversationMessage[] };
const messages = Array.isArray(data.messages) ? data.messages : [];
const response: ConversationResponse = { messages };
return NextResponse.json(response);
} catch (error) {
console.error('[ai/conversation] Failed to load conversation', error);
return NextResponse.json(
{
error: 'Failed to load conversation',
details: error instanceof Error ? error.message : String(error),
},
{ status: 500 },
);
}
}