Files
vibn-frontend/lib/ai/llm-client.ts

44 lines
1.0 KiB
TypeScript

import type { ZodType, ZodTypeDef } from 'zod';
export type LlmModel = 'gemini' | 'gpt' | 'sonnet';
export interface LlmMessage {
role: 'system' | 'user' | 'assistant';
content: string;
}
export interface ThinkingConfig {
/**
* Thinking level for Gemini 3 models
* - 'low': Minimizes latency and cost (for simple tasks)
* - 'high': Maximizes reasoning depth (for complex tasks, default)
*/
thinking_level?: 'low' | 'high';
/**
* Whether to include thoughts in the response
* Useful for debugging/understanding model reasoning
*/
include_thoughts?: boolean;
}
export interface StructuredCallArgs<TOutput> {
model: LlmModel;
systemPrompt: string;
messages: LlmMessage[];
schema: ZodType<TOutput, ZodTypeDef, any>;
temperature?: number;
/**
* Gemini 3 thinking configuration
* Enables internal reasoning before responding
*/
thinking_config?: ThinkingConfig;
}
export interface LlmClient {
structuredCall<TOutput>(args: StructuredCallArgs<TOutput>): Promise<TOutput>;
}