VIBN Frontend for Coolify deployment

This commit is contained in:
2026-02-15 19:25:52 -08:00
commit 40bf8428cd
398 changed files with 76513 additions and 0 deletions

42
lib/ai/chat-extractor.ts Normal file
View File

@@ -0,0 +1,42 @@
import type { LlmClient } from '@/lib/ai/llm-client';
import { ChatExtractionSchema } from '@/lib/ai/chat-extraction-types';
import type { ChatExtractionData } from '@/lib/ai/chat-extraction-types';
import type { KnowledgeItem } from '@/lib/types/knowledge';
const SYSTEM_PROMPT = `
You are the Product Chat Signal Extractor for stalled SaaS projects.
- Read the provided transcript carefully.
- Extract grounded signals about the product, market, users, execution status, and unknowns.
- Never invent data. Use "null" or empty arrays when the transcript lacks information.
- Respond with valid JSON that matches the provided schema exactly. Do not include prose or code fences.
`.trim();
export async function runChatExtraction(
knowledgeItem: KnowledgeItem,
llm: LlmClient,
): Promise<ChatExtractionData> {
const transcript = knowledgeItem.content.trim();
const userMessage = `
You will analyze the following transcript. Use message references when listing evidence (e.g., msg_1).
Focus on actionable product-building insights.
TRANSCRIPT_START
${transcript}
TRANSCRIPT_END`.trim();
return llm.structuredCall<ChatExtractionData>({
model: 'gemini',
systemPrompt: SYSTEM_PROMPT,
messages: [
{
role: 'user',
content: userMessage,
},
],
schema: ChatExtractionSchema,
temperature: 0.2,
});
}