VIBN Frontend for Coolify deployment
This commit is contained in:
43
lib/ai/llm-client.ts
Normal file
43
lib/ai/llm-client.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import type { ZodType, ZodTypeDef } from 'zod';
|
||||
|
||||
export type LlmModel = 'gemini' | 'gpt' | 'sonnet';
|
||||
|
||||
export interface LlmMessage {
|
||||
role: 'system' | 'user' | 'assistant';
|
||||
content: string;
|
||||
}
|
||||
|
||||
export interface ThinkingConfig {
|
||||
/**
|
||||
* Thinking level for Gemini 3 models
|
||||
* - 'low': Minimizes latency and cost (for simple tasks)
|
||||
* - 'high': Maximizes reasoning depth (for complex tasks, default)
|
||||
*/
|
||||
thinking_level?: 'low' | 'high';
|
||||
|
||||
/**
|
||||
* Whether to include thoughts in the response
|
||||
* Useful for debugging/understanding model reasoning
|
||||
*/
|
||||
include_thoughts?: boolean;
|
||||
}
|
||||
|
||||
export interface StructuredCallArgs<TOutput> {
|
||||
model: LlmModel;
|
||||
systemPrompt: string;
|
||||
messages: LlmMessage[];
|
||||
schema: ZodType<TOutput, ZodTypeDef, any>;
|
||||
temperature?: number;
|
||||
|
||||
/**
|
||||
* Gemini 3 thinking configuration
|
||||
* Enables internal reasoning before responding
|
||||
*/
|
||||
thinking_config?: ThinkingConfig;
|
||||
}
|
||||
|
||||
export interface LlmClient {
|
||||
structuredCall<TOutput>(args: StructuredCallArgs<TOutput>): Promise<TOutput>;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user