VIBN Frontend for Coolify deployment

This commit is contained in:
2026-02-15 19:25:52 -08:00
commit 40bf8428cd
398 changed files with 76513 additions and 0 deletions

43
lib/ai/llm-client.ts Normal file
View File

@@ -0,0 +1,43 @@
import type { ZodType, ZodTypeDef } from 'zod';
export type LlmModel = 'gemini' | 'gpt' | 'sonnet';
export interface LlmMessage {
role: 'system' | 'user' | 'assistant';
content: string;
}
export interface ThinkingConfig {
/**
* Thinking level for Gemini 3 models
* - 'low': Minimizes latency and cost (for simple tasks)
* - 'high': Maximizes reasoning depth (for complex tasks, default)
*/
thinking_level?: 'low' | 'high';
/**
* Whether to include thoughts in the response
* Useful for debugging/understanding model reasoning
*/
include_thoughts?: boolean;
}
export interface StructuredCallArgs<TOutput> {
model: LlmModel;
systemPrompt: string;
messages: LlmMessage[];
schema: ZodType<TOutput, ZodTypeDef, any>;
temperature?: number;
/**
* Gemini 3 thinking configuration
* Enables internal reasoning before responding
*/
thinking_config?: ThinkingConfig;
}
export interface LlmClient {
structuredCall<TOutput>(args: StructuredCallArgs<TOutput>): Promise<TOutput>;
}