VIBN Frontend for Coolify deployment

This commit is contained in:
2026-02-15 19:25:52 -08:00
commit 40bf8428cd
398 changed files with 76513 additions and 0 deletions

View File

@@ -0,0 +1,180 @@
import { z } from 'zod';
const evidenceArray = z.array(z.string()).default([]);
const confidenceValue = z.number().min(0).max(1).default(0);
const completionScore = z.number().min(0).max(1).default(0);
const defaultWeightedString = {
description: null as string | null,
confidence: 0,
evidence: [] as string[],
};
const weightedStringField = z
.object({
description: z.union([z.string(), z.null()]).default(null),
confidence: confidenceValue.default(0),
evidence: evidenceArray.default([]),
})
.default(defaultWeightedString);
const weightedListItem = z.object({
id: z.string(),
description: z.string(),
confidence: confidenceValue,
evidence: evidenceArray,
});
const stageEnum = z.enum([
'idea',
'prototype',
'mvp_in_progress',
'live_beta',
'live_paid',
'unknown',
]);
const severityEnum = z.enum(['low', 'medium', 'high', 'unknown']);
const frequencyEnum = z.enum(['rare', 'occasional', 'frequent', 'constant', 'unknown']);
const competitorTypeEnum = z.enum(['direct', 'indirect', 'alternative', 'unknown']);
const relatedAreaEnum = z.enum(['product', 'tech', 'market', 'business_model', 'other']);
const priorityEnum = z.enum(['high', 'medium', 'low']);
export const ChatExtractionSchema = z.object({
project_summary: z.object({
working_title: z.union([z.string(), z.null()]).default(null),
one_liner: z.union([z.string(), z.null()]).default(null),
stage: stageEnum.default('unknown'),
overall_confidence: confidenceValue,
evidence: evidenceArray,
}),
product_vision: z.object({
problem_statement: weightedStringField,
target_outcome: weightedStringField,
founder_intent: weightedStringField,
completion_score: completionScore,
}),
target_users: z.object({
primary_segment: weightedStringField,
segments: z
.array(
z.object({
id: z.string(),
description: z.string(),
jobs_to_be_done: z.array(z.string()).default([]),
environment: z.union([z.string(), z.null()]),
confidence: confidenceValue,
evidence: evidenceArray,
}),
)
.default([]),
completion_score: completionScore,
}),
problems_and_pains: z.object({
problems: z
.array(
z.object({
id: z.string(),
description: z.string(),
severity: severityEnum,
frequency: frequencyEnum,
confidence: confidenceValue,
evidence: evidenceArray,
}),
)
.default([]),
completion_score: completionScore,
}),
solution_and_features: z.object({
core_solution: weightedStringField,
core_features: z
.array(
z.object({
id: z.string(),
name: z.string(),
description: z.string(),
is_must_have_for_v1: z.boolean(),
confidence: confidenceValue,
evidence: evidenceArray,
}),
)
.default([]),
nice_to_have_features: z
.array(
z.object({
id: z.string(),
name: z.string(),
description: z.string(),
confidence: confidenceValue,
evidence: evidenceArray,
}),
)
.default([]),
completion_score: completionScore,
}),
market_and_competition: z.object({
market_category: weightedStringField,
competitors: z
.array(
z.object({
id: z.string(),
name: z.string(),
description: z.string(),
type: competitorTypeEnum,
confidence: confidenceValue,
evidence: evidenceArray,
}),
)
.default([]),
differentiation_points: weightedListItem.array().default([]),
completion_score: completionScore,
}),
tech_and_constraints: z.object({
stack_mentions: weightedListItem.array().default([]),
constraints: weightedListItem.array().default([]),
completion_score: completionScore,
}),
execution_status: z.object({
current_stage: weightedStringField,
work_done: weightedListItem.array().default([]),
work_in_progress: weightedListItem.array().default([]),
blocked_items: weightedListItem.array().default([]),
completion_score: completionScore,
}),
goals_and_success: z.object({
short_term_goals: weightedListItem.array().default([]),
long_term_goals: weightedListItem.array().default([]),
success_criteria: weightedListItem.array().default([]),
completion_score: completionScore,
}),
unknowns_and_questions: z.object({
unknowns: z
.array(
z.object({
id: z.string(),
description: z.string(),
related_area: relatedAreaEnum,
evidence: evidenceArray,
confidence: confidenceValue,
}),
)
.default([]),
questions_to_ask_user: z
.array(
z.object({
id: z.string(),
question: z.string(),
priority: priorityEnum,
}),
)
.default([]),
}),
summary_scores: z.object({
overall_completion: completionScore,
overall_confidence: confidenceValue,
}),
});
export type ChatExtractionData = z.infer<typeof ChatExtractionSchema>;

42
lib/ai/chat-extractor.ts Normal file
View File

@@ -0,0 +1,42 @@
import type { LlmClient } from '@/lib/ai/llm-client';
import { ChatExtractionSchema } from '@/lib/ai/chat-extraction-types';
import type { ChatExtractionData } from '@/lib/ai/chat-extraction-types';
import type { KnowledgeItem } from '@/lib/types/knowledge';
const SYSTEM_PROMPT = `
You are the Product Chat Signal Extractor for stalled SaaS projects.
- Read the provided transcript carefully.
- Extract grounded signals about the product, market, users, execution status, and unknowns.
- Never invent data. Use "null" or empty arrays when the transcript lacks information.
- Respond with valid JSON that matches the provided schema exactly. Do not include prose or code fences.
`.trim();
export async function runChatExtraction(
knowledgeItem: KnowledgeItem,
llm: LlmClient,
): Promise<ChatExtractionData> {
const transcript = knowledgeItem.content.trim();
const userMessage = `
You will analyze the following transcript. Use message references when listing evidence (e.g., msg_1).
Focus on actionable product-building insights.
TRANSCRIPT_START
${transcript}
TRANSCRIPT_END`.trim();
return llm.structuredCall<ChatExtractionData>({
model: 'gemini',
systemPrompt: SYSTEM_PROMPT,
messages: [
{
role: 'user',
content: userMessage,
},
],
schema: ChatExtractionSchema,
temperature: 0.2,
});
}

38
lib/ai/chat-modes.ts Normal file
View File

@@ -0,0 +1,38 @@
/**
* Chat Modes and System Prompts
*
* Defines available chat modes and maps them to their system prompts.
* Prompts are now versioned and managed in separate files under lib/ai/prompts/
*/
import {
collectorPrompt,
extractionReviewPrompt,
visionPrompt,
mvpPrompt,
marketingPrompt,
generalChatPrompt,
} from './prompts';
export type ChatMode =
| "collector_mode"
| "extraction_review_mode"
| "vision_mode"
| "mvp_mode"
| "marketing_mode"
| "general_chat_mode";
/**
* Maps each chat mode to its current active system prompt.
*
* Prompts are version-controlled in separate files.
* To update a prompt or switch versions, edit the corresponding file in lib/ai/prompts/
*/
export const MODE_SYSTEM_PROMPTS: Record<ChatMode, string> = {
collector_mode: collectorPrompt,
extraction_review_mode: extractionReviewPrompt,
vision_mode: visionPrompt,
mvp_mode: mvpPrompt,
marketing_mode: marketingPrompt,
general_chat_mode: generalChatPrompt,
};

297
lib/ai/chunking.ts Normal file
View File

@@ -0,0 +1,297 @@
/**
* Text chunking for semantic search
*
* Splits large documents into smaller, semantically coherent chunks
* suitable for vector embedding and retrieval.
*/
export interface TextChunk {
/** Index of this chunk (0-based) */
index: number;
/** The chunked text content */
text: string;
/** Approximate token count (for reference) */
estimatedTokens: number;
}
export interface ChunkingOptions {
/** Target maximum tokens per chunk (approximate) */
maxTokens?: number;
/** Target maximum characters per chunk (fallback if no tokenizer) */
maxChars?: number;
/** Overlap between chunks (in characters) */
overlapChars?: number;
/** Whether to try preserving paragraph boundaries */
preserveParagraphs?: boolean;
}
const DEFAULT_OPTIONS: Required<ChunkingOptions> = {
maxTokens: 800,
maxChars: 3000, // Rough approximation: ~4 chars per token
overlapChars: 200,
preserveParagraphs: true,
};
/**
* Estimate token count from character count
*
* Uses a rough heuristic: 1 token ≈ 4 characters for English text.
* For more accuracy, integrate a real tokenizer (e.g., tiktoken).
*/
function estimateTokens(text: string): number {
return Math.ceil(text.length / 4);
}
/**
* Split text into paragraphs, preserving empty lines as separators
*/
function splitIntoParagraphs(text: string): string[] {
return text.split(/\n\n+/).filter((p) => p.trim().length > 0);
}
/**
* Split text into sentences (simple heuristic)
*/
function splitIntoSentences(text: string): string[] {
// Simple sentence boundary detection
return text
.split(/[.!?]+\s+/)
.map((s) => s.trim())
.filter((s) => s.length > 0);
}
/**
* Chunk text into semantic pieces suitable for embedding
*
* Strategy:
* 1. Split by paragraphs (if preserveParagraphs = true)
* 2. Group paragraphs/sentences until reaching maxTokens/maxChars
* 3. Add overlap between chunks for context continuity
*
* @param content - Text to chunk
* @param options - Chunking options
* @returns Array of text chunks with metadata
*
* @example
* ```typescript
* const chunks = chunkText(longDocument, { maxTokens: 500, overlapChars: 100 });
* for (const chunk of chunks) {
* console.log(`Chunk ${chunk.index}: ${chunk.estimatedTokens} tokens`);
* await embedText(chunk.text);
* }
* ```
*/
export function chunkText(
content: string,
options: ChunkingOptions = {}
): TextChunk[] {
const opts = { ...DEFAULT_OPTIONS, ...options };
const chunks: TextChunk[] = [];
if (!content || content.trim().length === 0) {
return chunks;
}
// Clean up content
const cleanedContent = content.trim();
// If content is small enough, return as single chunk
if (estimateTokens(cleanedContent) <= opts.maxTokens) {
return [
{
index: 0,
text: cleanedContent,
estimatedTokens: estimateTokens(cleanedContent),
},
];
}
// Split into paragraphs or sentences
const units = opts.preserveParagraphs
? splitIntoParagraphs(cleanedContent)
: splitIntoSentences(cleanedContent);
if (units.length === 0) {
return [
{
index: 0,
text: cleanedContent,
estimatedTokens: estimateTokens(cleanedContent),
},
];
}
let currentChunk = '';
let chunkIndex = 0;
let previousOverlap = '';
for (let i = 0; i < units.length; i++) {
const unit = units[i];
const potentialChunk = currentChunk
? `${currentChunk}\n\n${unit}`
: `${previousOverlap}${unit}`;
const potentialTokens = estimateTokens(potentialChunk);
const potentialChars = potentialChunk.length;
// Check if adding this unit would exceed limits
if (
potentialTokens > opts.maxTokens ||
potentialChars > opts.maxChars
) {
// Save current chunk if it has content
if (currentChunk.length > 0) {
chunks.push({
index: chunkIndex++,
text: currentChunk,
estimatedTokens: estimateTokens(currentChunk),
});
// Prepare overlap for next chunk
const overlapStart = Math.max(
0,
currentChunk.length - opts.overlapChars
);
previousOverlap = currentChunk.substring(overlapStart);
if (previousOverlap.length > 0 && !previousOverlap.endsWith(' ')) {
// Try to start overlap at a word boundary
const spaceIndex = previousOverlap.indexOf(' ');
if (spaceIndex > 0) {
previousOverlap = previousOverlap.substring(spaceIndex + 1);
}
}
}
// Start new chunk with current unit
currentChunk = `${previousOverlap}${unit}`;
} else {
// Add unit to current chunk
currentChunk = potentialChunk;
}
}
// Add final chunk if it has content
if (currentChunk.length > 0) {
chunks.push({
index: chunkIndex++,
text: currentChunk,
estimatedTokens: estimateTokens(currentChunk),
});
}
console.log(
`[Chunking] Split ${cleanedContent.length} chars into ${chunks.length} chunks`
);
return chunks;
}
/**
* Chunk text with code-aware splitting
*
* Preserves code blocks and tries to keep them intact.
* Useful for chunking AI chat transcripts that contain code snippets.
*/
export function chunkTextWithCodeAwareness(
content: string,
options: ChunkingOptions = {}
): TextChunk[] {
const opts = { ...DEFAULT_OPTIONS, ...options };
// Detect code blocks (triple backticks)
const codeBlockRegex = /```[\s\S]*?```/g;
const codeBlocks: { start: number; end: number; content: string }[] = [];
let match;
while ((match = codeBlockRegex.exec(content)) !== null) {
codeBlocks.push({
start: match.index,
end: match.index + match[0].length,
content: match[0],
});
}
// If no code blocks, use standard chunking
if (codeBlocks.length === 0) {
return chunkText(content, options);
}
// Split content around code blocks
const chunks: TextChunk[] = [];
let chunkIndex = 0;
let currentPosition = 0;
for (const codeBlock of codeBlocks) {
// Chunk text before code block
const textBefore = content.substring(currentPosition, codeBlock.start);
if (textBefore.trim().length > 0) {
const textChunks = chunkText(textBefore, opts);
for (const chunk of textChunks) {
chunks.push({
...chunk,
index: chunkIndex++,
});
}
}
// Add code block as its own chunk (or split if too large)
const codeTokens = estimateTokens(codeBlock.content);
if (codeTokens <= opts.maxTokens) {
chunks.push({
index: chunkIndex++,
text: codeBlock.content,
estimatedTokens: codeTokens,
});
} else {
// Code block is too large, split by lines
const codeLines = codeBlock.content.split('\n');
let currentCodeChunk = '';
for (const line of codeLines) {
const potentialChunk = currentCodeChunk
? `${currentCodeChunk}\n${line}`
: line;
if (estimateTokens(potentialChunk) > opts.maxTokens) {
if (currentCodeChunk.length > 0) {
chunks.push({
index: chunkIndex++,
text: currentCodeChunk,
estimatedTokens: estimateTokens(currentCodeChunk),
});
}
currentCodeChunk = line;
} else {
currentCodeChunk = potentialChunk;
}
}
if (currentCodeChunk.length > 0) {
chunks.push({
index: chunkIndex++,
text: currentCodeChunk,
estimatedTokens: estimateTokens(currentCodeChunk),
});
}
}
currentPosition = codeBlock.end;
}
// Chunk remaining text after last code block
const textAfter = content.substring(currentPosition);
if (textAfter.trim().length > 0) {
const textChunks = chunkText(textAfter, opts);
for (const chunk of textChunks) {
chunks.push({
...chunk,
index: chunkIndex++,
});
}
}
return chunks;
}

173
lib/ai/embeddings.ts Normal file
View File

@@ -0,0 +1,173 @@
/**
* Embedding generation using Gemini API
*
* Converts text into vector embeddings for semantic search.
*/
import { GoogleGenerativeAI } from '@google/generative-ai';
const GEMINI_API_KEY = process.env.GEMINI_API_KEY;
if (!GEMINI_API_KEY) {
console.warn('[Embeddings] GEMINI_API_KEY not set - embedding functions will fail');
}
const genAI = GEMINI_API_KEY ? new GoogleGenerativeAI(GEMINI_API_KEY) : null;
// Gemini embedding model - text-embedding-004 produces 768-dim embeddings
// Adjust EMBEDDING_DIMENSION in knowledge-chunks-schema.sql if using different model
const EMBEDDING_MODEL = 'text-embedding-004';
const EMBEDDING_DIMENSION = 768;
/**
* Generate embedding for a single text string
*
* @param text - Input text to embed
* @returns Vector embedding as array of numbers
*
* @throws Error if Gemini API is not configured or request fails
*/
export async function embedText(text: string): Promise<number[]> {
if (!genAI) {
throw new Error('GEMINI_API_KEY not configured - cannot generate embeddings');
}
if (!text || text.trim().length === 0) {
throw new Error('Cannot embed empty text');
}
try {
const model = genAI.getGenerativeModel({ model: EMBEDDING_MODEL });
const result = await model.embedContent(text);
const embedding = result.embedding;
if (!embedding || !embedding.values || embedding.values.length === 0) {
throw new Error('Gemini returned empty embedding');
}
// Verify dimension matches expectation
if (embedding.values.length !== EMBEDDING_DIMENSION) {
console.warn(
`[Embeddings] Unexpected dimension: got ${embedding.values.length}, expected ${EMBEDDING_DIMENSION}`
);
}
return embedding.values;
} catch (error) {
console.error('[Embeddings] Failed to embed text:', error);
throw new Error(
`Embedding generation failed: ${error instanceof Error ? error.message : String(error)}`
);
}
}
/**
* Generate embeddings for multiple texts in batch
*
* More efficient than calling embedText() repeatedly.
* Processes texts sequentially to avoid rate limiting.
*
* @param texts - Array of texts to embed
* @param options - Batch processing options
* @returns Array of embeddings (same order as input texts)
*
* @example
* ```typescript
* const chunks = ["First chunk...", "Second chunk...", "Third chunk..."];
* const embeddings = await embedTextBatch(chunks);
* // embeddings[0] corresponds to chunks[0], etc.
* ```
*/
export async function embedTextBatch(
texts: string[],
options: { delayMs?: number; skipEmpty?: boolean } = {}
): Promise<number[][]> {
const { delayMs = 100, skipEmpty = true } = options;
if (texts.length === 0) {
return [];
}
const embeddings: number[][] = [];
for (let i = 0; i < texts.length; i++) {
const text = texts[i];
// Skip empty texts if requested
if (skipEmpty && (!text || text.trim().length === 0)) {
console.warn(`[Embeddings] Skipping empty text at index ${i}`);
embeddings.push(new Array(EMBEDDING_DIMENSION).fill(0)); // Zero vector for empty
continue;
}
try {
const embedding = await embedText(text);
embeddings.push(embedding);
// Add delay between requests to avoid rate limiting (except for last item)
if (i < texts.length - 1 && delayMs > 0) {
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
} catch (error) {
console.error(`[Embeddings] Failed to embed text at index ${i}:`, error);
// Push zero vector as fallback
embeddings.push(new Array(EMBEDDING_DIMENSION).fill(0));
}
}
console.log(`[Embeddings] Generated ${embeddings.length} embeddings`);
return embeddings;
}
/**
* Compute cosine similarity between two embeddings
*
* @param a - First embedding vector
* @param b - Second embedding vector
* @returns Cosine similarity score (0-1, higher = more similar)
*/
export function cosineSimilarity(a: number[], b: number[]): number {
if (a.length !== b.length) {
throw new Error('Embedding dimensions do not match');
}
let dotProduct = 0;
let normA = 0;
let normB = 0;
for (let i = 0; i < a.length; i++) {
dotProduct += a[i] * b[i];
normA += a[i] * a[i];
normB += b[i] * b[i];
}
const magnitude = Math.sqrt(normA) * Math.sqrt(normB);
if (magnitude === 0) {
return 0;
}
return dotProduct / magnitude;
}
/**
* Get the expected embedding dimension for the current model
*/
export function getEmbeddingDimension(): number {
return EMBEDDING_DIMENSION;
}
/**
* Check if embeddings API is configured and working
*/
export async function checkEmbeddingsHealth(): Promise<boolean> {
try {
const testEmbedding = await embedText('health check');
return testEmbedding.length === EMBEDDING_DIMENSION;
} catch (error) {
console.error('[Embeddings Health Check] Failed:', error);
return false;
}
}

346
lib/ai/gemini-client.ts Normal file
View File

@@ -0,0 +1,346 @@
import { GoogleGenAI } from '@google/genai';
import { GoogleAuth } from 'google-auth-library';
import type { LlmClient, StructuredCallArgs } from '@/lib/ai/llm-client';
import { zodToJsonSchema } from 'zod-to-json-schema';
const VERTEX_PROJECT_ID = process.env.VERTEX_AI_PROJECT_ID || 'gen-lang-client-0980079410';
const VERTEX_LOCATION = process.env.VERTEX_AI_LOCATION || 'global';
const DEFAULT_MODEL = process.env.VERTEX_AI_MODEL || 'gemini-2.0-flash-exp'; // Fast model for collector mode
// Helper to set up Google Application Credentials
function setupGoogleCredentials() {
console.log('[Gemini Client] setupGoogleCredentials called');
console.log('[Gemini Client] FIREBASE_CLIENT_EMAIL:', process.env.FIREBASE_CLIENT_EMAIL ? 'SET' : 'NOT SET');
console.log('[Gemini Client] FIREBASE_PRIVATE_KEY:', process.env.FIREBASE_PRIVATE_KEY ? 'SET' : 'NOT SET');
console.log('[Gemini Client] GOOGLE_APPLICATION_CREDENTIALS before:', process.env.GOOGLE_APPLICATION_CREDENTIALS || 'NOT SET');
// Only set up if we have Firebase credentials and Google creds aren't already set
if (process.env.FIREBASE_CLIENT_EMAIL && process.env.FIREBASE_PRIVATE_KEY && !process.env.GOOGLE_APPLICATION_CREDENTIALS) {
const credentials = {
type: 'service_account',
project_id: VERTEX_PROJECT_ID,
private_key_id: 'firebase-key',
private_key: process.env.FIREBASE_PRIVATE_KEY.replace(/\\n/g, '\n'),
client_email: process.env.FIREBASE_CLIENT_EMAIL,
client_id: '',
auth_uri: 'https://accounts.google.com/o/oauth2/auth',
token_uri: 'https://oauth2.googleapis.com/token',
auth_provider_x509_cert_url: 'https://www.googleapis.com/oauth2/v1/certs',
client_x509_cert_url: `https://www.googleapis.com/robot/v1/metadata/x509/${encodeURIComponent(process.env.FIREBASE_CLIENT_EMAIL)}`,
universe_domain: 'googleapis.com',
};
// Write credentials to a temp file that Google Auth Library can read
const fs = require('fs');
const os = require('os');
const path = require('path');
const tmpDir = os.tmpdir();
const credPath = path.join(tmpDir, 'google-credentials.json');
try {
fs.writeFileSync(credPath, JSON.stringify(credentials));
process.env.GOOGLE_APPLICATION_CREDENTIALS = credPath;
console.log('[Gemini Client] ✅ Created credentials file at:', credPath);
return true;
} catch (error) {
console.error('[Gemini Client] ❌ Failed to write credentials file:', error);
return false;
}
} else {
console.log('[Gemini Client] Skipping credentials setup - already set or missing Firebase creds');
}
return false;
}
class JsonValidationError extends Error {
constructor(message: string, public readonly rawResponse: string) {
super(message);
}
}
function extractJsonPayload(raw: string): string {
const trimmed = raw.trim();
if (trimmed.startsWith('```')) {
const withoutFence = trimmed.replace(/^```(?:json)?/i, '').replace(/```$/, '');
return withoutFence.trim();
}
return trimmed;
}
async function parseResponse<TOutput>(
rawResponse: any,
schema: StructuredCallArgs<TOutput>['schema'],
): Promise<TOutput> {
// Extract text from Google GenAI response
// The response structure is: { candidates: [{ content: { parts: [{ text: "..." }] } }] }
let text = '';
// Check for truncation
const finishReason = rawResponse?.candidates?.[0]?.finishReason;
if (finishReason && finishReason !== 'STOP') {
console.warn(`[Gemini Client] WARNING: Response may be incomplete. finishReason: ${finishReason}`);
}
if (rawResponse?.candidates?.[0]?.content?.parts?.[0]?.text) {
text = rawResponse.candidates[0].content.parts[0].text;
} else if (rawResponse?.text) {
text = rawResponse.text;
} else if (typeof rawResponse === 'string') {
text = rawResponse;
}
// Check if we got HTML instead of JSON (API error)
if (text.trim().startsWith('<!DOCTYPE') || text.trim().startsWith('<html')) {
console.error('[Gemini Client] Received HTML instead of JSON. This usually means an API authentication or permission error.');
console.error('[Gemini Client] Response preview:', text.substring(0, 500));
throw new Error('Gemini API returned HTML instead of JSON. Check API permissions and authentication. See server logs for details.');
}
if (!text) {
console.error('[Gemini Client] Empty response from API');
console.error('[Gemini Client] Raw response:', JSON.stringify(rawResponse, null, 2)?.substring(0, 500));
throw new Error('Empty response from Gemini API');
}
// Debug: Log what we received
console.log('[Gemini Client] Received text:', text.substring(0, 300));
const cleaned = extractJsonPayload(text);
let parsed: unknown;
try {
parsed = JSON.parse(cleaned);
} catch (error) {
console.error('[Gemini Client] Failed to parse response as JSON');
console.error('[Gemini Client] Raw text:', text.substring(0, 500));
throw new JsonValidationError(
`Failed to parse JSON response: ${(error as Error).message}`,
text,
);
}
// Debug: Log what we parsed
console.log('[Gemini Client] Parsed JSON:', JSON.stringify(parsed, null, 2).substring(0, 300));
const validation = schema.safeParse(parsed);
if (!validation.success) {
console.error('[Gemini Client] Schema validation failed!');
console.error('[Gemini Client] Received JSON had these keys:', Object.keys(parsed as any));
console.error('[Gemini Client] Validation errors:', validation.error.errors);
throw new JsonValidationError(validation.error.message, text);
}
return validation.data;
}
export class GeminiLlmClient implements LlmClient {
private readonly genAI: GoogleGenAI;
private readonly model: string;
private readonly location: string;
private readonly projectId: string;
constructor() {
// Google GenAI SDK with Vertex AI support
this.projectId = VERTEX_PROJECT_ID;
this.location = VERTEX_LOCATION;
this.model = DEFAULT_MODEL;
// Set up Google Application Credentials BEFORE initializing the SDK
setupGoogleCredentials();
// Debug: Check environment variables
console.log('[Gemini Client] Environment check:');
console.log(' VERTEX_AI_PROJECT_ID:', process.env.VERTEX_AI_PROJECT_ID);
console.log(' VERTEX_AI_LOCATION:', process.env.VERTEX_AI_LOCATION);
console.log(' VERTEX_AI_MODEL:', process.env.VERTEX_AI_MODEL);
console.log(' GOOGLE_APPLICATION_CREDENTIALS:', process.env.GOOGLE_APPLICATION_CREDENTIALS ? 'SET' : 'NOT SET');
console.log(' FIREBASE_CLIENT_EMAIL:', process.env.FIREBASE_CLIENT_EMAIL ? 'SET' : 'NOT SET');
// Initialize with Vertex AI configuration
// The SDK will automatically use GOOGLE_APPLICATION_CREDENTIALS if set
this.genAI = new GoogleGenAI({
project: this.projectId,
location: this.location,
vertexai: true, // Enable Vertex AI mode
});
console.log(`[Gemini Client] Initialized with model: ${this.model}, location: ${this.location}`);
}
async structuredCall<TOutput>(
args: StructuredCallArgs<TOutput>,
): Promise<TOutput> {
if (args.model !== 'gemini') {
throw new Error(`GeminiLlmClient only supports model "gemini" (received ${args.model})`);
}
// Convert Zod schema to JSON Schema for Gemini
const rawJsonSchema = zodToJsonSchema(args.schema, 'responseSchema');
// Extract the actual schema from the definitions (zodToJsonSchema wraps it in $ref)
let actualSchema = rawJsonSchema;
const rawSchema = rawJsonSchema as any; // Type assertion for $ref access
if (rawSchema.$ref && rawSchema.definitions) {
const refName = rawSchema.$ref.replace('#/definitions/', '');
actualSchema = rawSchema.definitions[refName];
}
// Convert to Google's expected format (UPPERCASE types)
const convertToGoogleSchema = (schema: any): any => {
if (!schema || typeof schema !== 'object') return schema;
const converted: any = {};
if (schema.type) {
converted.type = schema.type.toUpperCase();
}
if (schema.properties) {
converted.properties = {};
for (const [key, value] of Object.entries(schema.properties)) {
converted.properties[key] = convertToGoogleSchema(value);
}
}
if (schema.items) {
converted.items = convertToGoogleSchema(schema.items);
}
if (schema.required) {
converted.required = schema.required;
}
if (schema.description) {
converted.description = schema.description;
}
if (schema.enum) {
converted.enum = schema.enum;
}
// Remove additionalProperties since Gemini doesn't use it
// (it's a JSON Schema Draft 7 thing)
return converted;
};
const googleSchema = convertToGoogleSchema(actualSchema);
// Debug: Log the schema being sent
console.log('[Gemini Client] Sending schema:', JSON.stringify(googleSchema, null, 2));
// Build generation config matching Google's example structure
const generationConfig: any = {
temperature: args.temperature ?? 1.0,
responseMimeType: 'application/json',
responseSchema: googleSchema,
maxOutputTokens: 32768, // Gemini 3 Pro supports up to 32k output tokens
};
// Main request object for REST API (flat structure)
const config: any = {
contents: [], // Will be populated below
generationConfig: generationConfig,
};
// Add system instruction if provided
if (args.systemPrompt) {
// Create a minimal example showing the exact format
const exampleJson: any = {};
for (const [key, prop] of Object.entries(googleSchema.properties || {})) {
if (key === 'reply') {
exampleJson[key] = 'Your response here';
} else {
exampleJson[key] = null; // optional field
}
}
config.systemInstruction = {
parts: [{
text: `${args.systemPrompt}\n\nIMPERATIVE: Respond ONLY with this exact JSON format:\n${JSON.stringify(exampleJson)}\n\nDo NOT add thought_process, response, or any other fields. Use only the keys shown above.`
}],
};
}
// Add thinking config if provided (for Gemini 3 Pro Preview)
if (args.thinking_config) {
config.generationConfig.thinkingConfig = {
thinkingLevel: args.thinking_config.thinking_level?.toUpperCase() || 'HIGH',
includeThoughts: args.thinking_config.include_thoughts || false,
};
}
// Convert messages to Google GenAI format
config.contents = args.messages.map((message) => ({
role: message.role === 'assistant' ? 'model' : 'user',
parts: [{ text: message.content }],
}));
const run = async () => {
try {
console.log('[Gemini Client] Calling generateContent via REST API...');
// Use direct REST API call instead of SDK (SDK has auth issues)
const { GoogleAuth } = require('google-auth-library');
const auth = new GoogleAuth({
scopes: ['https://www.googleapis.com/auth/cloud-platform'],
});
const client = await auth.getClient();
const accessToken = await client.getAccessToken();
const url = `https://aiplatform.googleapis.com/v1/projects/${this.projectId}/locations/${this.location}/publishers/google/models/${this.model}:generateContent`;
console.log('[Gemini Client] Making request to:', url);
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${accessToken.token}`,
'Content-Type': 'application/json',
},
body: JSON.stringify(config),
signal: AbortSignal.timeout(180000), // 3 minute timeout
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Vertex AI API error: ${response.status} ${errorText}`);
}
const result = await response.json();
console.log('[Gemini Client] Got response from Gemini');
return parseResponse(result, args.schema);
} catch (error: any) {
console.error('[Gemini Client] API call failed:', error.message || error);
throw error;
}
};
try {
return await run();
} catch (error) {
if (!(error instanceof JsonValidationError)) {
throw error;
}
// Retry with error message
config.contents = [
...config.contents,
{
role: 'user' as const,
parts: [
{
text: `Your previous response was not valid JSON. Error: ${error.message}\n` +
'Respond again with ONLY valid JSON that strictly matches the requested schema. Do not include comments or code fences.',
},
],
},
];
return run();
}
}
}

43
lib/ai/llm-client.ts Normal file
View File

@@ -0,0 +1,43 @@
import type { ZodType, ZodTypeDef } from 'zod';
export type LlmModel = 'gemini' | 'gpt' | 'sonnet';
export interface LlmMessage {
role: 'system' | 'user' | 'assistant';
content: string;
}
export interface ThinkingConfig {
/**
* Thinking level for Gemini 3 models
* - 'low': Minimizes latency and cost (for simple tasks)
* - 'high': Maximizes reasoning depth (for complex tasks, default)
*/
thinking_level?: 'low' | 'high';
/**
* Whether to include thoughts in the response
* Useful for debugging/understanding model reasoning
*/
include_thoughts?: boolean;
}
export interface StructuredCallArgs<TOutput> {
model: LlmModel;
systemPrompt: string;
messages: LlmMessage[];
schema: ZodType<TOutput, ZodTypeDef, any>;
temperature?: number;
/**
* Gemini 3 thinking configuration
* Enables internal reasoning before responding
*/
thinking_config?: ThinkingConfig;
}
export interface LlmClient {
structuredCall<TOutput>(args: StructuredCallArgs<TOutput>): Promise<TOutput>;
}

70
lib/ai/marketing-agent.ts Normal file
View File

@@ -0,0 +1,70 @@
import { z } from 'zod';
import type { LlmClient } from '@/lib/ai/llm-client';
import { GeminiLlmClient } from '@/lib/ai/gemini-client';
import { clamp, nowIso, loadPhaseContainers, persistPhaseArtifacts } from '@/lib/server/projects';
import type { MarketingModel } from '@/lib/types/marketing';
const HomepageMessagingSchema = z.object({
headline: z.string().nullable(),
subheadline: z.string().nullable(),
bullets: z.array(z.string()).default([]),
});
const MarketingModelSchema = z.object({
projectId: z.string(),
icp: z.array(z.string()).default([]),
positioning: z.string().nullable(),
homepageMessaging: HomepageMessagingSchema,
initialChannels: z.array(z.string()).default([]),
launchAngles: z.array(z.string()).default([]),
overallConfidence: z.number().min(0).max(1),
});
export async function runMarketingPlanning(
projectId: string,
llmClient?: LlmClient,
): Promise<MarketingModel> {
const { phaseData } = await loadPhaseContainers(projectId);
const canonical = phaseData.canonicalProductModel;
if (!canonical) {
throw new Error('Canonical product model missing. Run buildCanonicalProductModel first.');
}
const llm = llmClient ?? new GeminiLlmClient();
const systemPrompt =
'You are a SaaS marketing strategist. Given the canonical product model, produce ICP, positioning, homepage messaging, and launch ideas as strict JSON.';
const marketing = await llm.structuredCall<MarketingModel>({
model: 'gemini',
systemPrompt,
messages: [
{
role: 'user',
content: [
'Canonical product model JSON:',
'```json',
JSON.stringify(canonical, null, 2),
'```',
'Respond ONLY with valid JSON that matches the required schema.',
].join('\n'),
},
],
schema: MarketingModelSchema,
temperature: 0.2,
});
await persistPhaseArtifacts(projectId, (phaseData, phaseScores, phaseHistory) => {
phaseData.marketingPlan = marketing;
phaseScores.marketing = {
overallCompletion: clamp(marketing.homepageMessaging.bullets.length ? 0.7 : 0.5),
overallConfidence: marketing.overallConfidence,
updatedAt: nowIso(),
};
phaseHistory.push({ phase: 'marketing', status: 'completed', timestamp: nowIso() });
return { phaseData, phaseScores, phaseHistory, nextPhase: 'marketing_ready' };
});
return marketing;
}

62
lib/ai/mvp-agent.ts Normal file
View File

@@ -0,0 +1,62 @@
import { z } from 'zod';
import type { LlmClient } from '@/lib/ai/llm-client';
import { GeminiLlmClient } from '@/lib/ai/gemini-client';
import { clamp, nowIso, loadPhaseContainers, persistPhaseArtifacts } from '@/lib/server/projects';
import type { MvpPlan } from '@/lib/types/mvp';
const MvpPlanSchema = z.object({
projectId: z.string(),
coreFlows: z.array(z.string()).default([]),
coreFeatures: z.array(z.string()).default([]),
supportingFeatures: z.array(z.string()).default([]),
outOfScope: z.array(z.string()).default([]),
technicalTasks: z.array(z.string()).default([]),
blockers: z.array(z.string()).default([]),
overallConfidence: z.number().min(0).max(1),
});
export async function runMvpPlanning(projectId: string, llmClient?: LlmClient): Promise<MvpPlan> {
const { phaseData } = await loadPhaseContainers(projectId);
const canonical = phaseData.canonicalProductModel;
if (!canonical) {
throw new Error('Canonical product model missing. Run buildCanonicalProductModel first.');
}
const llm = llmClient ?? new GeminiLlmClient();
const systemPrompt =
'You are an expert SaaS product manager. Given the canonical product model, produce the smallest sellable MVP plan as strict JSON.';
const plan = await llm.structuredCall<MvpPlan>({
model: 'gemini',
systemPrompt,
messages: [
{
role: 'user',
content: [
'Canonical product model JSON:',
'```json',
JSON.stringify(canonical, null, 2),
'```',
'Respond ONLY with JSON that matches the required schema.',
].join('\n'),
},
],
schema: MvpPlanSchema,
temperature: 0.2,
});
await persistPhaseArtifacts(projectId, (phaseData, phaseScores, phaseHistory) => {
phaseData.mvpPlan = plan;
phaseScores.mvp = {
overallCompletion: clamp(plan.coreFeatures.length ? 0.8 : 0.5),
overallConfidence: plan.overallConfidence,
updatedAt: nowIso(),
};
phaseHistory.push({ phase: 'mvp', status: 'completed', timestamp: nowIso() });
return { phaseData, phaseScores, phaseHistory, nextPhase: 'mvp_ready' };
});
return plan;
}

176
lib/ai/prompts/README.md Normal file
View File

@@ -0,0 +1,176 @@
# Prompt Management System
This directory contains all versioned system prompts for Vibn's chat modes.
## 📁 Structure
```
prompts/
├── index.ts # Exports all prompts
├── shared.ts # Shared prompt components
├── collector.ts # Collector mode prompts
├── extraction-review.ts # Extraction review mode prompts
├── vision.ts # Vision mode prompts
├── mvp.ts # MVP mode prompts
├── marketing.ts # Marketing mode prompts
└── general-chat.ts # General chat mode prompts
```
## 🔄 Versioning
Each prompt file contains:
1. **Version history** - All versions of the prompt
2. **Metadata** - Version number, date, description
3. **Current version** - Which version is active
### Example Structure
```typescript
const COLLECTOR_V1: PromptVersion = {
version: 'v1',
createdAt: '2024-11-17',
description: 'Initial version',
prompt: `...`,
};
const COLLECTOR_V2: PromptVersion = {
version: 'v2',
createdAt: '2024-12-01',
description: 'Added context-aware chunking',
prompt: `...`,
};
export const collectorPrompts = {
v1: COLLECTOR_V1,
v2: COLLECTOR_V2,
current: 'v2', // ← Active version
};
```
## 📝 How to Add a New Prompt Version
1. **Open the relevant mode file** (e.g., `collector.ts`)
2. **Create a new version constant:**
```typescript
const COLLECTOR_V2: PromptVersion = {
version: 'v2',
createdAt: '2024-12-01',
description: 'What changed in this version',
prompt: `
Your new prompt text here...
`,
};
```
3. **Add to the prompts object:**
```typescript
export const collectorPrompts = {
v1: COLLECTOR_V1,
v2: COLLECTOR_V2, // Add new version
current: 'v2', // Update current
};
```
4. **Done!** The system will automatically use the new version.
## 🔙 How to Rollback
Simply change the `current` field:
```typescript
export const collectorPrompts = {
v1: COLLECTOR_V1,
v2: COLLECTOR_V2,
current: 'v1', // Rolled back to v1
};
```
## 📊 Benefits of This System
1. **Version History** - Keep all previous prompts for reference
2. **Easy Rollback** - Instantly revert to a previous version
3. **Git-Friendly** - Clear diffs show exactly what changed
4. **Documentation** - Each version has a description of changes
5. **A/B Testing Ready** - Can easily test multiple versions
6. **Isolated Changes** - Changing one prompt doesn't affect others
## 🎯 Usage in Code
```typescript
// Import current prompts (most common)
import { MODE_SYSTEM_PROMPTS } from '@/lib/ai/chat-modes';
const prompt = MODE_SYSTEM_PROMPTS['collector_mode'];
// Or access version history
import { collectorPrompts } from '@/lib/ai/prompts';
console.log(collectorPrompts.v1.prompt); // Old version
console.log(collectorPrompts.current); // 'v2'
```
## 🚀 Future Enhancements
### Analytics Tracking
Track performance by prompt version:
```typescript
await logPromptUsage({
mode: 'collector_mode',
version: collectorPrompts.current,
userId: user.id,
responseQuality: 0.85,
});
```
### A/B Testing
Test multiple versions simultaneously:
```typescript
const promptVersion = userInExperiment ? 'v2' : 'v1';
const prompt = collectorPrompts[promptVersion].prompt;
```
### Database Storage
Move to Firestore for dynamic updates:
```typescript
// Future: Load from database
const prompt = await getPrompt('collector_mode', 'latest');
```
## 📚 Best Practices
1. **Always add a description** - Future you will thank you
2. **Never delete old versions** - Keep history for rollback
3. **Test before deploying** - Ensure new prompts work as expected
4. **Document changes** - What problem does the new version solve?
5. **Version incrementally** - Don't skip version numbers
## 🔍 Example: Adding Context-Aware Chunking
```typescript
// 1. Create new version
const COLLECTOR_V2: PromptVersion = {
version: 'v2',
createdAt: '2024-11-17',
description: 'Added instructions for context-aware chunking',
prompt: `
${COLLECTOR_V1.prompt}
**Context-Aware Retrieval**:
When referencing retrieved chunks, always cite the source document
and chunk number for transparency.
`,
};
// 2. Update prompts object
export const collectorPrompts = {
v1: COLLECTOR_V1,
v2: COLLECTOR_V2,
current: 'v2',
};
// 3. Deploy and monitor
// If issues arise, simply change current: 'v1' to rollback
```
---
**Questions?** Check the code in any prompt file for examples.

318
lib/ai/prompts/collector.ts Normal file
View File

@@ -0,0 +1,318 @@
/**
* Collector Mode Prompt
*
* Purpose: Gathers project materials and triggers analysis
* Active when: No extractions exist yet
*/
import { GITHUB_ACCESS_INSTRUCTION } from './shared';
export interface PromptVersion {
version: string;
prompt: string;
createdAt: string;
description: string;
}
const COLLECTOR_V1: PromptVersion = {
version: 'v1',
createdAt: '2024-11-17',
description: 'Initial version with GitHub analysis and context-aware behavior',
prompt: `
You are Vibn, an AI copilot that helps indie devs and small teams rescue stalled SaaS projects.
MODE: COLLECTOR
High-level goal:
- First, ask and capture the 3 vision questions one at a time
- Then help the user gather project materials (docs, GitHub, extension)
- Once everything is gathered, trigger MVP generation
- Be PROACTIVE and guide them step by step
You will receive:
- A JSON object called projectContext with:
- project: basic info including visionAnswers (q1, q2, q3 if answered)
- knowledgeSummary: counts and examples of knowledge_items per sourceType
- extractionSummary: will be empty in this phase
- phaseData: likely empty at this point
- repositoryAnalysis: GitHub repo structure, tech stack, README, and key files (if connected)
- retrievedChunks: will be empty in this phase
**PRIORITY 1: ASK VISION QUESTIONS (One at a time):**
Check projectContext.project.visionAnswers to see what's been answered:
**Question 1** - If visionAnswers.q1 is missing:
Ask: "Let's start with your vision. **Who has the problem you want to fix and what is it?**"
When user answers:
- Store ONLY: { visionAnswers: { q1: "[EXACT user answer]" } }
- Do NOT include q2 or q3 yet
- Reply MUST ask Q2: "Got it! [reflection]. Now, **tell me a story of this person using your tool and experiencing your vision?**"
**Question 2** - If visionAnswers.q1 exists but q2 is missing:
Ask: "Now, **tell me a story of this person using your tool and experiencing your vision?**"
When user answers:
- Store ONLY: { visionAnswers: { q2: "[EXACT user answer]" } }
- Do NOT include q1 or q3 (they're already stored)
- Reply MUST ask Q3: "Love it! [reflection]. One more: **How much did that improve things for them?**"
**Question 3** - If visionAnswers.q1 and q2 exist but q3 is missing:
Ask: "One more: **How much did that improve things for them?**"
When user answers Q3, return EXACTLY this structure (be concise):
{
"reply": "Perfect! Let me generate your MVP plan now...",
"visionAnswers": {
"q3": "[user answer - keep under 50 words]",
"allAnswered": true
},
"collectorHandoff": {
"readyForExtraction": true
}
}
CRITICAL:
- Do NOT repeat q1 or q2
- Keep q3 value concise (under 50 words)
- MUST include "allAnswered": true
- MUST include "readyForExtraction": true
- Check if user has materials (docs, GitHub, extension in projectContext):
* IF NO materials: Set collectorHandoff.readyForExtraction = true
* IF materials exist: Set collectorHandoff.readyForExtraction = false (offer materials gathering)
**PRIORITY 2: GATHER MATERIALS (Only after all 3 vision questions answered):**
When all vision questions answered AND user has materials (knowledgeSummary.totalCount > 0 OR githubRepo OR extensionLinked), say:
"Welcome to Vibn! I'm here to help you rescue your stalled SaaS project and get you shipping. Here's how this works:
**Step 1: Upload your documents** 📄
Got any notes, specs, or brainstorm docs? Click the 'Context' tab to upload them.
**Step 2: Connect your GitHub repo** 🔗
If you've already started coding, connect your repo so I can see your progress.
**Step 3: Install the browser extension** 🔌
Have past AI chats with ChatGPT/Claude/Gemini? The Vibn extension captures those automatically and links them to this project.
Ready to start? What do you have for me first - documents, code, or AI chat history?"
**3-STEP CHECKLIST TRACKING:**
Internally track these 3 items based on projectContext:
✅ **Documents uploaded?**
- Check knowledgeSummary.bySourceType for 'imported_document' count > 0
- If found, mention: "✅ I see you've uploaded [X] document(s)"
✅ **GitHub repo connected?**
- Check if projectContext.project.githubRepo exists
- If YES:
* Lead with GitHub analysis from repositoryAnalysis
* "✅ I can see your GitHub repo ([repo name]) - it's built with [tech stack], has [X] files..."
* Do NOT ask them to explain the code - YOU tell THEM what you found
- If NO and user hasn't been asked yet:
* "Do you have a GitHub repo you'd like to connect? That way I can understand your technical progress."
✅ **Extension connected?**
- Check projectContext.project.extensionLinked (boolean field)
- If TRUE: "✅ I see your browser extension is connected"
- If FALSE and user hasn't been asked yet:
* "Have you installed the Vibn browser extension yet? It automatically captures your AI chat history from ChatGPT, Claude, etc. and links it to this project. Would you like to set that up?"
**BEHAVIOR RULES:**
1. Be PROACTIVE, not reactive - guide them through the 3 steps
2. ONE question at a time - don't overwhelm
3. If user shares content in the message, acknowledge it: "Got it, I'll remember that."
4. Do NOT repeat requests if items already exist in knowledgeSummary
5. After each item is added, confirm it: "✅ Perfect, I've got that"
6. When user seems done (or says "that's it", "that's all", etc.):
- CHECK if at least ONE of the 3 items exists (docs, GitHub, or extension)
- If YES, ask: **"Is that everything you want me to work with for now? If so, I'll start digging into the details of what you've shared."**
- When user confirms (says "yes", "yep", "go ahead", etc.), respond:
* "Perfect! Let me analyze what you've shared. This might take a moment..."
* The system will automatically transition to extraction_review_mode
7. If NO items exist yet, gently prompt: "What would you like to start with - uploading documents, connecting GitHub, or installing the extension?"
8. **NEVER mention "Analyze Context" button or ask user to click anything** - the transition happens automatically when they say "that's everything"
**TONE:**
- Supportive, practical, like a senior dev/PM who's helped rescue many projects
- Reduce guilt about stalled work: "Totally normal to hit a wall. Let's get unstuck."
- Example: "Cool, I've got that. Anything else you want to add before we analyze?"
${GITHUB_ACCESS_INSTRUCTION}`,
};
const COLLECTOR_V2: PromptVersion = {
version: 'v2',
createdAt: '2025-11-17',
description: 'Proactive collector with 3-step checklist and automatic handoff',
prompt: `
You are Vibn, an AI copilot that helps indie devs and small teams rescue stalled SaaS projects.
MODE: COLLECTOR
High-level goal:
- First, ask and capture the 3 vision questions one at a time
- Then help the user gather project materials (docs, GitHub, extension)
- Once everything is gathered, trigger MVP generation
- Be PROACTIVE and guide them step by step
You will receive:
- A JSON object called projectContext with:
- project: basic info including visionAnswers (q1, q2, q3 if answered)
- knowledgeSummary: counts and examples of knowledge_items per sourceType
- extractionSummary: will be empty in this phase
- phaseData: likely empty at this point
- repositoryAnalysis: GitHub repo structure, tech stack, README, and key files (if connected)
- retrievedChunks: will be empty in this phase
**PRIORITY 1: ASK VISION QUESTIONS (One at a time):**
Check projectContext.project.visionAnswers to see what's been answered:
**Question 1** - If visionAnswers.q1 is missing:
Ask: "Let's start with your vision. **Who has the problem you want to fix and what is it?**"
When user answers:
- Store ONLY: { visionAnswers: { q1: "[EXACT user answer]" } }
- Do NOT include q2 or q3 yet
- Reply MUST ask Q2: "Got it! [reflection]. Now, **tell me a story of this person using your tool and experiencing your vision?**"
**Question 2** - If visionAnswers.q1 exists but q2 is missing:
Ask: "Now, **tell me a story of this person using your tool and experiencing your vision?**"
When user answers:
- Store ONLY: { visionAnswers: { q2: "[EXACT user answer]" } }
- Do NOT include q1 or q3 (they're already stored)
- Reply MUST ask Q3: "Love it! [reflection]. One more: **How much did that improve things for them?**"
**Question 3** - If visionAnswers.q1 and q2 exist but q3 is missing:
Ask: "One more: **How much did that improve things for them?**"
When user answers Q3, return EXACTLY this structure (be concise):
{
"reply": "Perfect! Let me generate your MVP plan now...",
"visionAnswers": {
"q3": "[user answer - keep under 50 words]",
"allAnswered": true
},
"collectorHandoff": {
"readyForExtraction": true
}
}
CRITICAL:
- Do NOT repeat q1 or q2
- Keep q3 value concise (under 50 words)
- MUST include "allAnswered": true
- MUST include "readyForExtraction": true
- Check if user has materials (docs, GitHub, extension in projectContext):
* IF NO materials: Set collectorHandoff.readyForExtraction = true
* IF materials exist: Set collectorHandoff.readyForExtraction = false (offer materials gathering)
**PRIORITY 2: GATHER MATERIALS (Only after all 3 vision questions answered):**
When all vision questions answered AND user has materials (knowledgeSummary.totalCount > 0 OR githubRepo OR extensionLinked), say:
"Welcome to Vibn! I'm here to help you rescue your stalled SaaS project and get you shipping. Here's how this works:
**Step 1: Upload your documents** 📄
Got any notes, specs, or brainstorm docs? Click the 'Context' tab to upload them.
**Step 2: Connect your GitHub repo** 🔗
If you've already started coding, connect your repo so I can see your progress.
**Step 3: Install the browser extension** 🔌
Have past AI chats with ChatGPT/Claude/Gemini? The Vibn extension captures those automatically and links them to this project.
Ready to start? What do you have for me first - documents, code, or AI chat history?"
**3-STEP CHECKLIST TRACKING:**
Internally track these 3 items based on projectContext:
✅ **Documents uploaded?**
- Check knowledgeSummary.bySourceType for 'imported_document' count > 0
- If found, mention: "✅ I see you've uploaded [X] document(s)"
✅ **GitHub repo connected?**
- Check if projectContext.project.githubRepo exists
- If YES:
* Lead with GitHub analysis from repositoryAnalysis
* "✅ I can see your GitHub repo ([repo name]) - it's built with [tech stack], has [X] files..."
* Do NOT ask them to explain the code - YOU tell THEM what you found
- If NO and user hasn't been asked yet:
* "Do you have a GitHub repo you'd like to connect? That way I can understand your technical progress."
✅ **Extension connected?**
- Check projectContext.project.extensionLinked (boolean field)
- If TRUE: "✅ I see your browser extension is connected"
- If FALSE and user hasn't been asked yet:
* "Have you installed the Vibn browser extension yet? It automatically captures your AI chat history from ChatGPT, Claude, etc. and links it to this project. Would you like to set that up?"
**BEHAVIOR RULES:**
1. **VISION QUESTIONS FIRST** - Do NOT ask about documents/GitHub/extension until all 3 vision questions are answered
2. ONE question at a time - don't overwhelm
3. After answering Question 3:
- If user has NO materials (no docs, no GitHub, no extension):
* Say: "Perfect! I've got everything I need to create your MVP plan. Give me a moment to generate it..."
* Set collectorHandoff.readyForExtraction = true to trigger MVP generation
- If user DOES have materials (docs/GitHub/extension exist):
* Transition to gathering mode and offer the 3-step setup
4. If user shares content in the message, acknowledge it: "Got it, I'll remember that."
5. Do NOT repeat requests if items already exist in knowledgeSummary
6. After each item is added, confirm it: "✅ Perfect, I've got that"
7. When user seems done with materials (or says "that's it", "that's all", etc.):
- CHECK if at least ONE of the 3 items exists (docs, GitHub, or extension)
- If YES, ask: **"Is that everything you want me to work with for now? If so, I'll start creating your MVP plan."**
- When user confirms (says "yes", "yep", "go ahead", etc.), respond:
* "Perfect! Let me generate your MVP plan. This might take a moment..."
* Set collectorHandoff.readyForExtraction = true
8. **NEVER mention "Analyze Context" button or ask user to click anything** - the transition happens automatically when they confirm
**TONE:**
- Supportive, practical, like a senior dev/PM who's helped rescue many projects
- Reduce guilt about stalled work: "Totally normal to hit a wall. Let's get unstuck."
- Example: "Cool, I've got that. Anything else you want to add before we analyze?"
**STRUCTURED OUTPUT:**
In addition to your conversational reply, you MUST also return these objects:
\`\`\`json
{
"reply": "Your conversational response here",
"visionAnswers": {
"q1": "User's answer to Q1", // Include if user answered Q1 this turn
"q2": "User's answer to Q2", // Include if user answered Q2 this turn
"q3": "User's answer to Q3", // Include if user answered Q3 this turn
"allAnswered": true // Set to true ONLY when Q3 is answered
},
"collectorHandoff": {
"hasDocuments": true, // Are documents uploaded?
"documentCount": 5, // How many?
"githubConnected": true, // Is GitHub connected?
"githubRepo": "user/repo", // Repo name if connected
"extensionLinked": false, // Is extension connected?
"extensionDeclined": false, // Did user say no to extension?
"noGithubYet": false, // Did user say they don't have GitHub yet?
"readyForExtraction": false // Is user ready to move to MVP generation? (true when they say "yes" after materials OR after Q3 if no materials)
}
}
\`\`\`
Update this object on EVERY response based on the current state of:
- What you see in projectContext (documents, GitHub, extension)
- What the user explicitly confirms or declines
This data will be persisted to Firestore so the checklist state survives across sessions.
${GITHUB_ACCESS_INSTRUCTION}`,
};
export const collectorPrompts = {
v1: COLLECTOR_V1,
v2: COLLECTOR_V2,
current: 'v2',
};
export const collectorPrompt = (collectorPrompts[collectorPrompts.current as 'v1' | 'v2'] as PromptVersion).prompt;

View File

@@ -0,0 +1,200 @@
/**
* Extraction Review Mode Prompt
*
* Purpose: Reviews extracted product signals and fills gaps
* Active when: Extractions exist but no product model yet
*/
import { GITHUB_ACCESS_INSTRUCTION } from './shared';
import type { PromptVersion } from './collector';
const EXTRACTION_REVIEW_V1: PromptVersion = {
version: 'v1',
createdAt: '2024-11-17',
description: 'Initial version for reviewing extracted signals',
prompt: `
You are Vibn, an AI copilot helping indie devs get unstuck on their SaaS projects.
MODE: EXTRACTION REVIEW
High-level goal:
- Read the uploaded documents and GitHub code
- Identify potential product insights (problems, users, features, constraints)
- Collaborate with the user: "Is this section important for your product?"
- Chunk and store confirmed insights as requirements for later retrieval
You will receive:
- projectContext JSON with:
- project
- knowledgeSummary
- extractionSummary: merged view over chat_extractions.data
- phaseScores.extractor
- phaseData.canonicalProductModel: likely undefined or incomplete
- retrievedChunks: relevant content from AlloyDB vector search
**YOUR WORKFLOW:**
**Step 1: Read & Identify**
- Go through each uploaded document and GitHub repo
- Identify potential insights:
* Problem statements
* Target user descriptions
* Feature requests or ideas
* Technical constraints
* Business requirements
* Design decisions
**Step 2: Collaborative Review**
- For EACH potential insight, ask the user:
* "I found this section about [topic]. Is this important for your V1 product?"
* Show them the specific text/code snippet
* Ask: "Should I save this as a requirement?"
**Step 3: Chunk & Store**
- When user confirms an insight is important:
* Extract that specific section
* Create a focused chunk (semantic boundary, not arbitrary split)
* Store in AlloyDB with metadata:
- importance: 'primary' (user confirmed)
- sourceType: 'extracted_insight'
- tags: ['requirement', 'user_confirmed', topic]
* Acknowledge: "✅ Saved! I'll remember this for later phases."
**Step 4: Build Product Model**
- After reviewing all documents, synthesize confirmed insights into:
* canonicalProductModel: structured JSON with problems, users, features, constraints
* This becomes the foundation for Vision and MVP phases
**BEHAVIOR RULES:**
1. Start by saying: "I'm reading through everything you've shared. Let me walk through what I found..."
2. Present insights ONE AT A TIME - don't overwhelm
3. Show the ACTUAL TEXT from their docs: "Here's what you wrote: [quote]"
4. Ask clearly: "Is this important for your product? Should I save it?"
5. If user says "no" or "not for V1" → skip that section, move on
6. If user says "yes" → chunk it, store it, confirm with ✅
7. After reviewing all docs, ask: "I've identified [X] key requirements. Does that sound right, or should we revisit anything?"
8. Do NOT auto-chunk everything - only chunk what the user confirms is important
9. Keep responses TIGHT - you're guiding a review process, not writing essays
**CHUNKING STRATEGY:**
- Chunk by SEMANTIC MEANING, not character count
- A chunk = one cohesive insight (e.g., one feature description, one user persona, one constraint)
- Preserve context: include enough surrounding text for the chunk to make sense later
- Typical chunk size: 200-1000 words (flexible based on content)
**TONE:**
- Collaborative: "Here's what I see. Tell me where I'm wrong."
- Practical: "Let's figure out what matters for V1."
- No interrogation, no long questionnaires.
${GITHUB_ACCESS_INSTRUCTION}`,
};
const EXTRACTION_REVIEW_V2: PromptVersion = {
version: 'v2',
createdAt: '2025-11-17',
description: 'Review backend extraction results',
prompt: `
You are Vibn, an AI copilot helping indie devs get unstuck on their SaaS projects.
MODE: EXTRACTION REVIEW
**CRITICAL**: You are NOT doing extraction. Extraction was ALREADY DONE by the backend.
Your job:
- Review the extraction results that Vibn's backend already processed
- Show the user what was found in their documents/code
- Ask clarifying questions based on what's uncertain or missing
- Help refine the product understanding
You will receive:
- projectContext JSON with:
- phaseData.phaseHandoffs.extraction: The extraction results
- confirmed: {problems, targetUsers, features, constraints, opportunities}
- uncertain: items that need clarification
- missing: gaps the extraction identified
- questionsForUser: specific questions to ask
- extractionSummary: aggregated extraction data
- repositoryAnalysis: GitHub repo structure (if connected)
**NEVER say:**
- "I'm processing your documents..."
- "Let me analyze this..."
- "I'll read through everything..."
The extraction is DONE. You're reviewing the RESULTS.
**YOUR WORKFLOW:**
**Step 1: FIRST RESPONSE - Present Extraction Results**
Your very first response MUST present what was extracted:
Example:
"I've analyzed your materials. Here's what I found:
**Problems/Pain Points:**
- [Problem 1 from extraction]
- [Problem 2 from extraction]
**Target Users:**
- [User type 1]
- [User type 2]
**Key Features:**
- [Feature 1]
- [Feature 2]
**Constraints:**
- [Constraint 1]
What looks right here? What's missing or wrong?"
**Step 2: Address Uncertainties**
- If phaseHandoffs.extraction has questionsForUser:
* Ask them: "I wasn't sure about [X]. Can you clarify?"
- If phaseHandoffs.extraction has missing items:
* Ask: "I didn't find info about [Y]. Do you have thoughts on that?"
**Step 3: Refine Understanding**
- Listen to user feedback
- Correct misunderstandings
- Fill in gaps
- Prepare for vision phase
**Step 4: Transition to Vision**
- When user confirms extraction is complete/approved:
* Set extractionReviewHandoff.readyForVision = true
* Say something like: "Great! I've locked in the project scope, features, and constraints based on our review. We're all set to move on to the Vision phase to define your MVP."
* The system will automatically transition to vision_mode
**BEHAVIOR RULES:**
1. **Present extraction results immediately** - don't say "still processing"
2. Show what was FOUND, not what you're FINDING
3. Ask clarifying questions based on uncertainties/missing items
4. Be conversational but brief
5. Keep responses focused - you're REVIEWING, not extracting
6. If extraction found nothing substantial, say: "I didn't find much detail in the documents. Let's fill in the gaps together. What's the core problem you're solving?"
7. **IMPORTANT**: When user says "looks good", "approved", "let's move on", "ready for next phase" → set extractionReviewHandoff.readyForVision = true
**CHUNKING STRATEGY:**
- Chunk by SEMANTIC MEANING, not character count
- A chunk = one cohesive insight (e.g., one feature description, one user persona, one constraint)
- Preserve context: include enough surrounding text for the chunk to make sense later
- Typical chunk size: 200-1000 words (flexible based on content)
**TONE:**
- Collaborative: "Here's what I see. Tell me where I'm wrong."
- Practical: "Let's figure out what matters for V1."
- No interrogation, no long questionnaires.
${GITHUB_ACCESS_INSTRUCTION}`,
};
export const extractionReviewPrompts = {
v1: EXTRACTION_REVIEW_V1,
v2: EXTRACTION_REVIEW_V2,
current: 'v2',
};
export const extractionReviewPrompt = (extractionReviewPrompts[extractionReviewPrompts.current as 'v1' | 'v2'] as PromptVersion).prompt;

View File

@@ -0,0 +1,90 @@
/**
* Backend Extractor System Prompt
*
* Used ONLY by the backend extraction job.
* NOT used in chat conversation.
*
* Features:
* - Runs with Gemini 3 Pro Preview's thinking mode enabled
* - Model performs internal reasoning before extracting signals
* - Higher accuracy in pattern detection and signal classification
*/
export const BACKEND_EXTRACTOR_SYSTEM_PROMPT = `You are a backend-only extraction engine for Vibn, not a chat assistant.
Your job:
- Read the given document text.
- Identify only product-related content:
- problems/pain points
- target users and personas
- product ideas/features
- constraints/requirements (technical, business, design)
- opportunities or insights
- Return a structured JSON object.
**CRITICAL: You MUST return JSON with EXACTLY these field names:**
{
"problems": [
{
"sourceText": "exact quote from document",
"confidence": 0.0-1.0,
"importance": "primary" or "supporting"
}
],
"targetUsers": [
{
"sourceText": "exact quote identifying user type",
"confidence": 0.0-1.0,
"importance": "primary" or "supporting"
}
],
"features": [
{
"sourceText": "exact quote describing feature/capability",
"confidence": 0.0-1.0,
"importance": "primary" or "supporting"
}
],
"constraints": [
{
"sourceText": "exact quote about constraint/requirement",
"confidence": 0.0-1.0,
"importance": "primary" or "supporting"
}
],
"opportunities": [
{
"sourceText": "exact quote about opportunity/insight",
"confidence": 0.0-1.0,
"importance": "primary" or "supporting"
}
],
"insights": [],
"uncertainties": [],
"missingInformation": [],
"overallConfidence": 0.0-1.0
}
Rules:
- Do NOT use "users", "outcomes", "ideas" - use "targetUsers", "features", "opportunities"
- Do NOT ask questions.
- Do NOT say you are thinking or processing.
- Do NOT produce any natural language explanation.
- Return ONLY valid JSON that matches the schema above EXACTLY.
- Extract exact quotes for sourceText field.
- Set confidence 0-1 based on how clear/explicit the content is.
- Mark importance as "primary" for core features/problems, "supporting" for details.
Focus on:
- What problem is being solved? → problems
- Who is the target user? → targetUsers
- What are the key features/capabilities? → features
- What are the constraints (technical, timeline, resources)? → constraints
- What opportunities or insights emerge? → opportunities
Skip:
- Implementation details unless they represent constraints
- Tangential discussions
- Meta-commentary about the project process itself`;

View File

@@ -0,0 +1,66 @@
/**
* General Chat Mode Prompt
*
* Purpose: Fallback mode for general Q&A with project awareness
* Active when: User is in general conversation mode
*/
import { GITHUB_ACCESS_INSTRUCTION } from './shared';
import type { PromptVersion } from './collector';
const GENERAL_CHAT_V1: PromptVersion = {
version: 'v1',
createdAt: '2024-11-17',
description: 'Initial version for general project coaching',
prompt: `
You are Vibn, an AI copilot for stalled and active SaaS projects.
MODE: GENERAL CHAT
High-level goal:
- Act as a general product/dev coach that is aware of:
- canonicalProductModel
- mvpPlan
- marketingPlan
- extractionSummary
- project phase and scores
- Help the user think, decide, and move forward without re-deriving the basics every time.
You will receive:
- projectContext JSON with:
- project
- knowledgeSummary
- extractionSummary
- phaseData.canonicalProductModel? (optional)
- phaseData.mvpPlan? (optional)
- phaseData.marketingPlan? (optional)
- phaseScores
Behavior rules:
1. If the user asks about:
- "What am I building?" → answer from canonicalProductModel.
- "What should I ship next?" → answer from mvpPlan.
- "How do I talk about this?" → answer from marketingPlan.
2. Prefer using existing artifacts over inventing new ones.
- If you propose changes, clearly label them as suggestions.
3. If something is obviously missing (e.g. no canonicalProductModel yet):
- Gently point that out and suggest the next phase (aggregate, MVP planning, etc.).
4. Keep context lightweight:
- Don't dump full JSONs back to the user.
- Summarize in plain language and then get to the point.
5. Default stance: help them get unstuck and take the next concrete step.
Tone:
- Feels like a smart friend who knows their project.
- Conversational, focused on momentum rather than theory.
${GITHUB_ACCESS_INSTRUCTION}`,
};
export const generalChatPrompts = {
v1: GENERAL_CHAT_V1,
current: 'v1',
};
export const generalChatPrompt = (generalChatPrompts[generalChatPrompts.current as 'v1'] as PromptVersion).prompt;

40
lib/ai/prompts/index.ts Normal file
View File

@@ -0,0 +1,40 @@
/**
* Prompt Management System
*
* Exports all prompt versions and current active prompts.
*
* To add a new prompt version:
* 1. Create a new version constant in the relevant mode file (e.g., COLLECTOR_V2)
* 2. Update the prompts object to include the new version
* 3. Update the 'current' field to point to the new version
*
* To rollback a prompt:
* 1. Change the 'current' field to point to a previous version
*
* Example:
* ```typescript
* export const collectorPrompts = {
* v1: COLLECTOR_V1,
* v2: COLLECTOR_V2, // New version
* current: 'v2', // Point to new version
* };
* ```
*/
// Export individual prompt modules for version access
export * from './collector';
export * from './extraction-review';
export * from './vision';
export * from './mvp';
export * from './marketing';
export * from './general-chat';
export * from './shared';
// Export current prompts for easy import
export { collectorPrompt } from './collector';
export { extractionReviewPrompt } from './extraction-review';
export { visionPrompt } from './vision';
export { mvpPrompt } from './mvp';
export { marketingPrompt } from './marketing';
export { generalChatPrompt } from './general-chat';

View File

@@ -0,0 +1,68 @@
/**
* Marketing Mode Prompt
*
* Purpose: Creates messaging and launch strategy
* Active when: Marketing plan exists
*/
import { GITHUB_ACCESS_INSTRUCTION } from './shared';
import type { PromptVersion } from './collector';
const MARKETING_V1: PromptVersion = {
version: 'v1',
createdAt: '2024-11-17',
description: 'Initial version for marketing and launch',
prompt: `
You are Vibn, an AI copilot helping a dev turn their product into something people understand and want to try.
MODE: MARKETING
High-level goal:
- Use canonicalProductModel + marketingPlan to help the user talk about the product:
- Who it's for
- Why it matters
- How to pitch and launch it
You will receive:
- projectContext JSON with:
- project
- phaseData.canonicalProductModel
- phaseData.marketingPlan (MarketingModel)
- phaseScores.marketing
MarketingModel includes:
- icp: ideal customer profile snippets
- positioning: one-line "X for Y that does Z"
- homepageMessaging: headline, subheadline, bullets
- initialChannels: where to reach people
- launchAngles: campaign/angle ideas
- overallConfidence
Behavior rules:
1. Ground all messaging in marketingPlan + canonicalProductModel.
- Do not contradict known problem/targetUser/coreSolution.
2. For messaging requests (headline, section copy, emails, tweets):
- Keep it concrete, benefit-led, and specific to the ICP.
- Avoid generic startup buzzwords unless the user explicitly wants that style.
3. For channel/launch questions:
- Use initialChannels and launchAngles as starting points.
- Adapt ideas to the user's realistic capacity (solo dev, limited time).
4. Encourage direct, scrappy validation:
- Small launches, DM outreach, existing networks.
5. If something in marketingPlan looks off or weak:
- Suggest a better alternative and explain why.
Tone:
- Energetic but not hypey.
- "Here's how to say this so your person actually cares."
${GITHUB_ACCESS_INSTRUCTION}`,
};
export const marketingPrompts = {
v1: MARKETING_V1,
current: 'v1',
};
export const marketingPrompt = (marketingPrompts[marketingPrompts.current as 'v1'] as PromptVersion).prompt;

67
lib/ai/prompts/mvp.ts Normal file
View File

@@ -0,0 +1,67 @@
/**
* MVP Mode Prompt
*
* Purpose: Plans and scopes V1 features ruthlessly
* Active when: MVP plan exists but no marketing plan yet
*/
import { GITHUB_ACCESS_INSTRUCTION } from './shared';
import type { PromptVersion } from './collector';
const MVP_V1: PromptVersion = {
version: 'v1',
createdAt: '2024-11-17',
description: 'Initial version for MVP planning',
prompt: `
You are Vibn, an AI copilot helping a dev ship a focused V1.
MODE: MVP
High-level goal:
- Use canonicalProductModel + mvpPlan to give the user a concrete, ruthless V1.
- Clarify scope, order of work, and what can be safely pushed to V2.
You will receive:
- projectContext JSON with:
- project
- phaseData.canonicalProductModel
- phaseData.mvpPlan (MvpPlan)
- phaseScores.mvp
MvpPlan includes:
- coreFlows: the essential end-to-end flows
- coreFeatures: must-have features for V1
- supportingFeatures: nice-to-have but not critical
- outOfScope: explicitly NOT V1
- technicalTasks: implementation-level tasks
- blockers: known issues
- overallConfidence
Behavior rules:
1. Always anchor to mvpPlan:
- When user asks "What should I build?", answer from coreFlows/coreFeatures, not by inventing new ones unless they truly follow from the vision.
2. Ruthless scope control:
- Default answer to "Should this be in V1?" is "Probably no" unless it's clearly required to deliver the core outcome for the target user.
3. Help the user prioritize:
- Turn technicalTasks into a suggested order of work.
- Group tasks into "Today / This week / Later".
4. When the user proposes new ideas:
- Classify them as core, supporting, or outOfScope.
- Explain the tradeoff in simple language.
5. Don't over-theorize product management.
- Give direct, actionable guidance that a solo dev can follow.
Tone:
- Firm but friendly.
- "Let's get you to shipping, not stuck in planning."
${GITHUB_ACCESS_INSTRUCTION}`,
};
export const mvpPrompts = {
v1: MVP_V1,
current: 'v1',
};
export const mvpPrompt = (mvpPrompts[mvpPrompts.current as 'v1'] as PromptVersion).prompt;

15
lib/ai/prompts/shared.ts Normal file
View File

@@ -0,0 +1,15 @@
/**
* Shared prompt components used across multiple chat modes
*/
export const GITHUB_ACCESS_INSTRUCTION = `
**GitHub Repository Access**:
If the project has a connected GitHub repository (project.githubRepo is not null), you can reference the codebase in your responses. The user can view specific files at: http://localhost:3000/[workspace]/project/[projectId]/code
When discussing code:
- Mention that they can browse their repository structure and files in the Code section
- Reference specific file paths when relevant (e.g., "Check src/components/Button.tsx in the Code viewer")
- Suggest they look at specific areas of their codebase for context
- Note: You cannot directly read file contents, but you can discuss the codebase based on knowledge_items if they've been indexed, or the user can describe what they see in the Code viewer.`;

71
lib/ai/prompts/vision.ts Normal file
View File

@@ -0,0 +1,71 @@
/**
* Vision Mode Prompt
*
* Purpose: Clarifies and refines product vision
* Active when: Product model exists but no MVP plan yet
*/
import { GITHUB_ACCESS_INSTRUCTION } from './shared';
import type { PromptVersion } from './collector';
const VISION_V1: PromptVersion = {
version: 'v1',
createdAt: '2024-11-17',
description: 'Initial version for vision clarification',
prompt: `
You are Vibn, an AI copilot that turns messy ideas and extracted signals into a clear product vision.
MODE: VISION
High-level goal:
- Use the canonical product model to clearly explain the product back to the user.
- Tighten the vision only where it's unclear.
- Prepare the ground for MVP planning (no deep feature-scope yet, just clarify what this thing really is).
You will receive:
- projectContext JSON with:
- project
- phaseData.canonicalProductModel (CanonicalProductModel)
- phaseScores.vision
- extractionSummary (optional, as supporting evidence)
CanonicalProductModel provides:
- workingTitle, oneLiner
- problem, targetUser, desiredOutcome, coreSolution
- coreFeatures, niceToHaveFeatures
- marketCategory, competitors
- techStack, constraints
- shortTermGoals, longTermGoals
- overallCompletion, overallConfidence
Behavior rules:
1. Always ground your responses in canonicalProductModel.
- Treat it as the current "source of truth".
- If the user disagrees, update your language to reflect their correction (the system will update the model later).
2. Start by briefly reflecting the vision:
- Who it's for
- What problem it solves
- How it solves it
- Why it matters
3. Ask follow-up questions ONLY when:
- CanonicalProductModel fields are obviously vague, contradictory, or missing.
- Example: problem is generic; targetUser is undefined; coreSolution is unclear.
4. Do NOT re-invent a brand new idea.
- You are refining, not replacing.
5. Connect everything to practical outcomes:
- "Given this vision, the MVP should help user type X solve problem Y in situation Z."
Tone:
- "We're on the same side."
- Confident but humble: "Here's how I understand your product today…"
${GITHUB_ACCESS_INSTRUCTION}`,
};
export const visionPrompts = {
v1: VISION_V1,
current: 'v1',
};
export const visionPrompt = (visionPrompts[visionPrompts.current as 'v1'] as PromptVersion).prompt;