deploy: current vibn theia state
Some checks failed
Playwright Tests / Playwright Tests (ubuntu-22.04, Node.js 22.x) (push) Has been cancelled
3PP License Check / 3PP License Check (11, 22.x, ubuntu-22.04) (push) Has been cancelled
Publish packages to NPM / Perform Publishing (push) Has been cancelled

Made-with: Cursor
This commit is contained in:
2026-02-27 12:01:08 -08:00
commit 8bb5110148
3782 changed files with 640947 additions and 0 deletions

View File

@@ -0,0 +1,10 @@
/** @type {import('eslint').Linter.Config} */
module.exports = {
extends: [
'../../configs/build.eslintrc.json'
],
parserOptions: {
tsconfigRootDir: __dirname,
project: 'tsconfig.json'
}
};

View File

@@ -0,0 +1,33 @@
<div align='center'>
<br />
<img src='https://raw.githubusercontent.com/eclipse-theia/theia/master/logo/theia.svg?sanitize=true' alt='theia-ext-logo' width='100px' />
<h2>ECLIPSE THEIA - AI GOOGLE EXTENSION</h2>
<hr />
</div>
## Description
The `@theia/ai-google` integrates Google's models with Theia AI.
The Google API key and the models to use can be configured via preferences.
Alternatively the API key can also be handed in via the `GOOGLE_API_KEY` environment variable.
## Additional Information
- [API documentation for `@theia/ai-google`](https://eclipse-theia.github.io/theia/docs/next/modules/_theia_ai-google.html)
- [Theia - GitHub](https://github.com/eclipse-theia/theia)
- [Theia - Website](https://theia-ide.org/)
## License
- [Eclipse Public License 2.0](http://www.eclipse.org/legal/epl-2.0/)
- [一 (Secondary) GNU General Public License, version 2 with the GNU Classpath Exception](https://projects.eclipse.org/license/secondary-gpl-2.0-cp)
## Trademark
"Theia" is a trademark of the Eclipse Foundation
<https://www.eclipse.org/theia>

View File

@@ -0,0 +1,52 @@
{
"name": "@theia/ai-google",
"version": "1.68.0",
"description": "Theia - Google AI Integration",
"dependencies": {
"@google/genai": "^1.30.0",
"@theia/ai-core": "1.68.0",
"@theia/core": "1.68.0"
},
"publishConfig": {
"access": "public"
},
"theiaExtensions": [
{
"frontend": "lib/browser/google-frontend-module",
"backend": "lib/node/google-backend-module"
}
],
"keywords": [
"theia-extension",
"ai",
"google",
"gemini"
],
"license": "EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0",
"repository": {
"type": "git",
"url": "https://github.com/eclipse-theia/theia.git"
},
"bugs": {
"url": "https://github.com/eclipse-theia/theia/issues"
},
"homepage": "https://github.com/eclipse-theia/theia",
"files": [
"lib",
"src"
],
"scripts": {
"build": "theiaext build",
"clean": "theiaext clean",
"compile": "theiaext compile",
"lint": "theiaext lint",
"test": "theiaext test",
"watch": "theiaext watch"
},
"devDependencies": {
"@theia/ext-scripts": "1.68.0"
},
"nyc": {
"extends": "../../configs/nyc.json"
}
}

View File

@@ -0,0 +1,100 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
import { FrontendApplicationContribution } from '@theia/core/lib/browser';
import { inject, injectable } from '@theia/core/shared/inversify';
import { GoogleLanguageModelsManager, GoogleModelDescription } from '../common';
import { API_KEY_PREF, MODELS_PREF, MAX_RETRIES, RETRY_DELAY_OTHER_ERRORS, RETRY_DELAY_RATE_LIMIT } from '../common/google-preferences';
import { PreferenceService } from '@theia/core';
const GOOGLE_PROVIDER_ID = 'google';
@injectable()
export class GoogleFrontendApplicationContribution implements FrontendApplicationContribution {
@inject(PreferenceService)
protected preferenceService: PreferenceService;
@inject(GoogleLanguageModelsManager)
protected manager: GoogleLanguageModelsManager;
protected prevModels: string[] = [];
onStart(): void {
this.preferenceService.ready.then(() => {
const apiKey = this.preferenceService.get<string>(API_KEY_PREF, undefined);
this.manager.setApiKey(apiKey);
this.manager.setMaxRetriesOnErrors(this.preferenceService.get<number>(MAX_RETRIES, 3));
this.manager.setRetryDelayOnRateLimitError(this.preferenceService.get<number>(RETRY_DELAY_RATE_LIMIT, 60));
this.manager.setRetryDelayOnOtherErrors(this.preferenceService.get<number>(RETRY_DELAY_OTHER_ERRORS, -1));
const models = this.preferenceService.get<string[]>(MODELS_PREF, []);
this.manager.createOrUpdateLanguageModels(...models.map(modelId => this.createGeminiModelDescription(modelId)));
this.prevModels = [...models];
this.preferenceService.onPreferenceChanged(event => {
if (event.preferenceName === API_KEY_PREF) {
const newApiKey = this.preferenceService.get<string>(API_KEY_PREF, undefined);
this.manager.setApiKey(newApiKey);
this.handleKeyChange(newApiKey);
} else if (event.preferenceName === MAX_RETRIES) {
this.manager.setMaxRetriesOnErrors(this.preferenceService.get<number>(MAX_RETRIES, 3));
} else if (event.preferenceName === RETRY_DELAY_RATE_LIMIT) {
this.manager.setRetryDelayOnRateLimitError(this.preferenceService.get<number>(RETRY_DELAY_RATE_LIMIT, 60));
} else if (event.preferenceName === RETRY_DELAY_OTHER_ERRORS) {
this.manager.setRetryDelayOnOtherErrors(this.preferenceService.get<number>(RETRY_DELAY_OTHER_ERRORS, -1));
} else if (event.preferenceName === MODELS_PREF) {
this.handleModelChanges(this.preferenceService.get<string[]>(MODELS_PREF, []));
}
});
});
}
/**
* Called when the API key changes. Updates all Google models on the manager to ensure the new key is used.
*/
protected handleKeyChange(newApiKey: string | undefined): void {
if (this.prevModels && this.prevModels.length > 0) {
this.manager.createOrUpdateLanguageModels(...this.prevModels.map(modelId => this.createGeminiModelDescription(modelId)));
}
}
protected handleModelChanges(newModels: string[]): void {
const oldModels = new Set(this.prevModels);
const updatedModels = new Set(newModels);
const modelsToRemove = [...oldModels].filter(model => !updatedModels.has(model));
const modelsToAdd = [...updatedModels].filter(model => !oldModels.has(model));
this.manager.removeLanguageModels(...modelsToRemove.map(model => `${GOOGLE_PROVIDER_ID}/${model}`));
this.manager.createOrUpdateLanguageModels(...modelsToAdd.map(modelId => this.createGeminiModelDescription(modelId)));
this.prevModels = newModels;
}
protected createGeminiModelDescription(modelId: string): GoogleModelDescription {
const id = `${GOOGLE_PROVIDER_ID}/${modelId}`;
const description: GoogleModelDescription = {
id: id,
model: modelId,
apiKey: true,
enableStreaming: true
};
return description;
}
}

View File

@@ -0,0 +1,32 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
import { ContainerModule } from '@theia/core/shared/inversify';
import { GooglePreferencesSchema } from '../common/google-preferences';
import { FrontendApplicationContribution, RemoteConnectionProvider, ServiceConnectionProvider } from '@theia/core/lib/browser';
import { GoogleFrontendApplicationContribution } from './google-frontend-application-contribution';
import { GOOGLE_LANGUAGE_MODELS_MANAGER_PATH, GoogleLanguageModelsManager } from '../common';
import { PreferenceContribution } from '@theia/core';
export default new ContainerModule(bind => {
bind(PreferenceContribution).toConstantValue({ schema: GooglePreferencesSchema });
bind(GoogleFrontendApplicationContribution).toSelf().inSingletonScope();
bind(FrontendApplicationContribution).toService(GoogleFrontendApplicationContribution);
bind(GoogleLanguageModelsManager).toDynamicValue(ctx => {
const provider = ctx.container.get<ServiceConnectionProvider>(RemoteConnectionProvider);
return provider.createProxy<GoogleLanguageModelsManager>(GOOGLE_LANGUAGE_MODELS_MANAGER_PATH);
}).inSingletonScope();
});

View File

@@ -0,0 +1,50 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
export const GOOGLE_LANGUAGE_MODELS_MANAGER_PATH = '/services/google/language-model-manager';
export const GoogleLanguageModelsManager = Symbol('GoogleLanguageModelsManager');
export interface GoogleModelDescription {
/**
* The identifier of the model which will be shown in the UI.
*/
id: string;
/**
* The model ID as used by the Google Gemini API.
*/
model: string;
/**
* The key for the model. If 'true' is provided the global Gemini API key will be used.
*/
apiKey: string | true | undefined;
/**
* Indicate whether the streaming API shall be used.
*/
enableStreaming: boolean;
/**
* Maximum number of tokens to generate. Default is 4096.
*/
maxTokens?: number;
}
export interface GoogleLanguageModelsManager {
apiKey: string | undefined;
setApiKey(key: string | undefined): void;
setMaxRetriesOnErrors(maxRetries: number): void;
setRetryDelayOnRateLimitError(retryDelay: number): void;
setRetryDelayOnOtherErrors(retryDelay: number): void;
createOrUpdateLanguageModels(...models: GoogleModelDescription[]): Promise<void>;
removeLanguageModels(...modelIds: string[]): void
}

View File

@@ -0,0 +1,71 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
import { AI_CORE_PREFERENCES_TITLE } from '@theia/ai-core/lib/common/ai-core-preferences';
import { nls, PreferenceSchema } from '@theia/core';
export const API_KEY_PREF = 'ai-features.google.apiKey';
export const MODELS_PREF = 'ai-features.google.models';
export const MAX_RETRIES = 'ai-features.google.maxRetriesOnErrors';
export const RETRY_DELAY_RATE_LIMIT = 'ai-features.google.retryDelayOnRateLimitError';
export const RETRY_DELAY_OTHER_ERRORS = 'ai-features.google.retryDelayOnOtherErrors';
export const GooglePreferencesSchema: PreferenceSchema = {
properties: {
[API_KEY_PREF]: {
type: 'string',
markdownDescription: nls.localize('theia/ai/google/apiKey/description',
'Enter an API Key of your official Google AI (Gemini) Account. **Please note:** By using this preference the GOOGLE AI API key will be stored in clear text\
on the machine running Theia. Use the environment variable `GOOGLE_API_KEY` to set the key securely.'),
title: AI_CORE_PREFERENCES_TITLE,
},
[MODELS_PREF]: {
type: 'array',
description: nls.localize('theia/ai/google/models/description', 'Official Google Gemini models to use'),
title: AI_CORE_PREFERENCES_TITLE,
default: ['gemini-3-pro-preview', 'gemini-3-flash-preview', 'gemini-2.5-pro', 'gemini-2.5-flash'],
items: {
type: 'string'
}
},
[MAX_RETRIES]: {
type: 'integer',
description: nls.localize('theia/ai/google/maxRetriesOnErrors/description',
'Maximum number of retries in case of errors. If smaller than 1, then the retry logic is disabled'),
title: AI_CORE_PREFERENCES_TITLE,
default: 3,
minimum: 0
},
[RETRY_DELAY_RATE_LIMIT]: {
type: 'number',
description: nls.localize('theia/ai/google/retryDelayOnRateLimitError/description',
'Delay in seconds between retries in case of rate limit errors. See https://ai.google.dev/gemini-api/docs/rate-limits'),
title: AI_CORE_PREFERENCES_TITLE,
default: 60,
minimum: 0
},
[RETRY_DELAY_OTHER_ERRORS]: {
type: 'number',
description: nls.localize('theia/ai/google/retryDelayOnOtherErrors/description',
'Delay in seconds between retries in case of other errors (sometimes the Google GenAI reports errors such as incomplete JSON syntax returned from the model \
or 500 Internal Server Error). Setting this to -1 prevents retries in these cases. Otherwise a retry happens either immediately (if set to 0) or after \
this delay in seconds (if set to a positive number).'),
title: AI_CORE_PREFERENCES_TITLE,
default: -1,
minimum: -1
}
}
};

View File

@@ -0,0 +1,16 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
export * from './google-language-models-manager';

View File

@@ -0,0 +1,36 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
import { ContainerModule } from '@theia/core/shared/inversify';
import { GOOGLE_LANGUAGE_MODELS_MANAGER_PATH, GoogleLanguageModelsManager } from '../common/google-language-models-manager';
import { ConnectionHandler, PreferenceContribution, RpcConnectionHandler } from '@theia/core';
import { GoogleLanguageModelsManagerImpl } from './google-language-models-manager-impl';
import { ConnectionContainerModule } from '@theia/core/lib/node/messaging/connection-container-module';
import { GooglePreferencesSchema } from '../common/google-preferences';
// We use a connection module to handle AI services separately for each frontend.
const geminiConnectionModule = ConnectionContainerModule.create(({ bind, bindBackendService, bindFrontendService }) => {
bind(GoogleLanguageModelsManagerImpl).toSelf().inSingletonScope();
bind(GoogleLanguageModelsManager).toService(GoogleLanguageModelsManagerImpl);
bind(ConnectionHandler).toDynamicValue(ctx =>
new RpcConnectionHandler(GOOGLE_LANGUAGE_MODELS_MANAGER_PATH, () => ctx.container.get(GoogleLanguageModelsManager))
).inSingletonScope();
});
export default new ContainerModule(bind => {
bind(PreferenceContribution).toConstantValue({ schema: GooglePreferencesSchema });
bind(ConnectionContainerModule).toConstantValue(geminiConnectionModule);
});

View File

@@ -0,0 +1,524 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
import {
createToolCallError,
ImageContent,
LanguageModel,
LanguageModelMessage,
LanguageModelRequest,
LanguageModelResponse,
LanguageModelStatus,
LanguageModelStreamResponse,
LanguageModelStreamResponsePart,
LanguageModelTextResponse,
TokenUsageService,
ToolCallResult,
ToolInvocationContext,
UserRequest
} from '@theia/ai-core';
import { CancellationToken } from '@theia/core';
import { GoogleGenAI, FunctionCallingConfigMode, FunctionDeclaration, Content, Schema, Part, Modality, FunctionResponse, ToolConfig } from '@google/genai';
import { wait } from '@theia/core/lib/common/promise-util';
import { GoogleLanguageModelRetrySettings } from './google-language-models-manager-impl';
import { UUID } from '@theia/core/shared/@lumino/coreutils';
interface ToolCallback {
readonly name: string;
readonly id: string;
args: string;
}
/**
* Converts a tool call result to the Gemini FunctionResponse format.
* Gemini requires response to be an object, not an array or primitive.
*/
function toFunctionResponse(content: ToolCallResult): FunctionResponse['response'] {
if (content === undefined) {
return {};
}
if (Array.isArray(content)) {
return { result: content };
}
if (typeof content === 'object') {
return content as FunctionResponse['response'];
}
return { result: content };
}
const convertMessageToPart = (message: LanguageModelMessage): Part[] | undefined => {
if (LanguageModelMessage.isTextMessage(message) && message.text.length > 0) {
return [{ text: message.text }];
} else if (LanguageModelMessage.isToolUseMessage(message)) {
return [{
functionCall: {
id: message.id, name: message.name, args: message.input as Record<string, unknown>
},
thoughtSignature: message.data?.thoughtSignature,
}];
} else if (LanguageModelMessage.isToolResultMessage(message)) {
return [{ functionResponse: { name: message.name, response: toFunctionResponse(message.content) } }];
} else if (LanguageModelMessage.isThinkingMessage(message)) {
return [{ thought: true, text: message.thinking }];
} else if (LanguageModelMessage.isImageMessage(message) && ImageContent.isBase64(message.image)) {
return [{ inlineData: { data: message.image.base64data, mimeType: message.image.mimeType } }];
}
};
/**
* Transforms Theia language model messages to Gemini API format
* @param messages Array of LanguageModelRequestMessage to transform
* @returns Object containing transformed messages and optional system message
*/
function transformToGeminiMessages(
messages: readonly LanguageModelMessage[]
): { contents: Content[]; systemMessage?: string } {
// Extract the system message (if any), as it is a separate parameter in the Gemini API.
const systemMessageObj = messages.find(message => message.actor === 'system');
const systemMessage = systemMessageObj && LanguageModelMessage.isTextMessage(systemMessageObj) && systemMessageObj.text || undefined;
const contents: Content[] = [];
for (const message of messages) {
if (message.actor === 'system') {
continue; // Skip system messages as they're handled separately
}
const resultParts = convertMessageToPart(message);
if (resultParts === undefined) {
continue;
}
const role = toGoogleRole(message);
const lastContent = contents.pop();
if (!lastContent) {
contents.push({ role, parts: resultParts });
} else if (lastContent.role !== role) {
contents.push(lastContent);
contents.push({ role, parts: resultParts });
} else {
lastContent?.parts?.push(...resultParts);
contents.push(lastContent);
}
}
return {
contents: contents,
systemMessage,
};
}
export const GoogleModelIdentifier = Symbol('GoogleModelIdentifier');
/**
* Converts Theia message actor to Gemini role
* @param message The message to convert
* @returns Gemini role ('user' or 'model')
*/
function toGoogleRole(message: LanguageModelMessage): 'user' | 'model' {
switch (message.actor) {
case 'ai':
return 'model';
default:
return 'user';
}
}
/**
* Implements the Gemini language model integration for Theia
*/
export class GoogleModel implements LanguageModel {
constructor(
public readonly id: string,
public model: string,
public status: LanguageModelStatus,
public enableStreaming: boolean,
public apiKey: () => string | undefined,
public retrySettings: () => GoogleLanguageModelRetrySettings,
protected readonly tokenUsageService?: TokenUsageService
) { }
protected getSettings(request: LanguageModelRequest): Readonly<Record<string, unknown>> {
return request.settings ?? {};
}
async request(request: UserRequest, cancellationToken?: CancellationToken): Promise<LanguageModelResponse> {
if (!request.messages?.length) {
throw new Error('Request must contain at least one message');
}
const genAI = this.initializeGemini();
try {
if (this.enableStreaming) {
return this.handleStreamingRequest(genAI, request, cancellationToken);
}
return this.handleNonStreamingRequest(genAI, request);
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';
throw new Error(`Gemini API request failed: ${errorMessage}`);
}
}
protected async handleStreamingRequest(
genAI: GoogleGenAI,
request: UserRequest,
cancellationToken?: CancellationToken,
toolMessages?: Content[]
): Promise<LanguageModelStreamResponse> {
const settings = this.getSettings(request);
const { contents: parts, systemMessage } = transformToGeminiMessages(request.messages);
const functionDeclarations = this.createFunctionDeclarations(request);
const toolConfig: ToolConfig = {};
if (functionDeclarations.length > 0) {
toolConfig.functionCallingConfig = {
mode: FunctionCallingConfigMode.AUTO,
};
}
// Wrap the API call in the retry mechanism
const stream = await this.withRetry(async () =>
genAI.models.generateContentStream({
model: this.model,
config: {
systemInstruction: systemMessage,
toolConfig,
responseModalities: [Modality.TEXT],
...(functionDeclarations.length > 0 && {
tools: [{
functionDeclarations
}]
}),
thinkingConfig: {
// https://ai.google.dev/gemini-api/docs/thinking#summaries
includeThoughts: true,
},
temperature: 1,
...settings
},
contents: [...parts, ...(toolMessages ?? [])]
}));
const that = this;
const asyncIterator = {
async *[Symbol.asyncIterator](): AsyncIterator<LanguageModelStreamResponsePart> {
const toolCallMap: { [key: string]: ToolCallback } = {};
const collectedParts: Part[] = [];
try {
for await (const chunk of stream) {
if (cancellationToken?.isCancellationRequested) {
break;
}
const finishReason = chunk.candidates?.[0].finishReason;
if (finishReason) {
switch (finishReason) {
// 'STOP' is the only valid (non-error) finishReason
// "Natural stop point of the model or provided stop sequence."
case 'STOP':
break;
// MALFORMED_FUNCTION_CALL: The model produced a malformed function call.
// Log warning but continue - there might still be usable text content.
case 'MALFORMED_FUNCTION_CALL':
console.warn('Gemini returned MALFORMED_FUNCTION_CALL finish reason.', {
finishReason,
candidate: chunk.candidates?.[0],
content: chunk.candidates?.[0]?.content,
parts: chunk.candidates?.[0]?.content?.parts,
text: chunk.text,
usageMetadata: chunk.usageMetadata
});
break;
// All other reasons are error-cases. Throw an Error.
// e.g. SAFETY, MAX_TOKENS, RECITATION, LANGUAGE, ...
// https://ai.google.dev/api/generate-content#FinishReason
default:
console.error('Gemini streaming ended with unexpected finish reason:', {
finishReason,
candidate: chunk.candidates?.[0],
content: chunk.candidates?.[0]?.content,
parts: chunk.candidates?.[0]?.content?.parts,
safetyRatings: chunk.candidates?.[0]?.safetyRatings,
text: chunk.text,
usageMetadata: chunk.usageMetadata
});
throw new Error(`Unexpected finish reason: ${finishReason}`);
}
}
// Handle thinking, text content, and function calls from parts
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
collectedParts.push(part);
if (part.text) {
if (part.thought) {
yield { thought: part.text, signature: part.thoughtSignature ?? '' };
} else {
yield { content: part.text };
}
} else if (part.functionCall) {
const functionCall = part.functionCall;
// Gemini does not always provide a function call ID (unlike Anthropic/OpenAI).
// We need a stable ID to track calls in toolCallMap and correlate results.
const callId = functionCall.id ?? UUID.uuid4().replace(/-/g, '');
let toolCall = toolCallMap[callId];
if (toolCall === undefined) {
toolCall = {
name: functionCall.name ?? '',
args: functionCall.args ? JSON.stringify(functionCall.args) : '{}',
id: callId,
};
toolCallMap[callId] = toolCall;
yield {
tool_calls: [{
finished: false,
id: toolCall.id,
function: {
name: toolCall.name,
arguments: toolCall.args
},
data: part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : undefined
}]
};
} else {
// Update to existing tool call
toolCall.args = functionCall.args ? JSON.stringify(functionCall.args) : '{}';
yield {
tool_calls: [{
function: {
arguments: toolCall.args
},
data: part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : undefined
}]
};
}
}
}
} else if (chunk.text) {
yield { content: chunk.text };
}
// Report token usage if available
if (chunk.usageMetadata && that.tokenUsageService && that.id) {
const promptTokens = chunk.usageMetadata.promptTokenCount;
const completionTokens = chunk.usageMetadata.candidatesTokenCount;
if (promptTokens && completionTokens) {
that.tokenUsageService.recordTokenUsage(that.id, {
inputTokens: promptTokens,
outputTokens: completionTokens,
requestId: request.requestId
}).catch(error => console.error('Error recording token usage:', error));
}
}
}
// Process tool calls if any exist
const toolCalls = Object.values(toolCallMap);
if (toolCalls.length > 0) {
// Collect tool results
const toolResult = await Promise.all(toolCalls.map(async tc => {
const tool = request.tools?.find(t => t.name === tc.name);
let result;
if (!tool) {
result = createToolCallError(`Tool '${tc.name}' not found in the available tools for this request.`, 'tool-not-available');
} else {
try {
result = await tool.handler(tc.args, ToolInvocationContext.create(tc.id));
} catch (e) {
console.error(`Error executing tool ${tc.name}:`, e);
result = createToolCallError(e.message || 'Tool execution failed');
}
}
return {
name: tc.name,
result: result,
id: tc.id,
arguments: tc.args,
};
}));
// Generate tool call responses
const calls = toolResult.map(tr => ({
finished: true,
id: tr.id,
result: tr.result,
function: { name: tr.name, arguments: tr.arguments },
}));
yield { tool_calls: calls };
// Format tool responses for Gemini
// According to Gemini docs, functionResponse needs name and response
const toolResponses: Part[] = toolResult.map(call => ({
functionResponse: {
name: call.name,
response: toFunctionResponse(call.result)
}
}));
const responseMessage: Content = { role: 'user', parts: toolResponses };
// Build the model's response content from collected parts
// Exclude thinking parts as they should not be included in the conversation history sent back to the model
const modelResponseParts = collectedParts.filter(p => !p.thought);
const modelContent: Content = { role: 'model', parts: modelResponseParts };
const messages = [...(toolMessages ?? []), modelContent, responseMessage];
// Continue the conversation with tool results
const continuedResponse = await that.handleStreamingRequest(
genAI,
request,
cancellationToken,
messages
);
// Stream the continued response
for await (const nestedEvent of continuedResponse.stream) {
yield nestedEvent;
}
}
} catch (e) {
console.error('Error in Gemini streaming:', e);
throw e;
}
},
};
return { stream: asyncIterator };
}
private createFunctionDeclarations(request: LanguageModelRequest): FunctionDeclaration[] {
if (!request.tools || request.tools.length === 0) {
return [];
}
return request.tools.map(tool => ({
name: tool.name,
description: tool.description,
parameters: (tool.parameters && Object.keys(tool.parameters.properties).length !== 0) ? tool.parameters as Schema : undefined
}));
}
protected async handleNonStreamingRequest(
genAI: GoogleGenAI,
request: UserRequest
): Promise<LanguageModelTextResponse> {
const settings = this.getSettings(request);
const { contents: parts, systemMessage } = transformToGeminiMessages(request.messages);
const functionDeclarations = this.createFunctionDeclarations(request);
// Wrap the API call in the retry mechanism
const model = await this.withRetry(async () => genAI.models.generateContent({
model: this.model,
config: {
systemInstruction: systemMessage,
toolConfig: {
functionCallingConfig: {
mode: FunctionCallingConfigMode.AUTO,
}
},
...(functionDeclarations.length > 0 && {
tools: [{ functionDeclarations }]
}),
...settings
},
contents: parts
}));
try {
let responseText = '';
// For non streaming requests we are always only interested in text parts
if (model.candidates?.[0]?.content?.parts) {
for (const part of model.candidates[0].content.parts) {
if (part.text) {
responseText += part.text;
}
}
} else {
responseText = model.text ?? '';
}
// Record token usage if available
if (model.usageMetadata && this.tokenUsageService) {
const promptTokens = model.usageMetadata.promptTokenCount;
const completionTokens = model.usageMetadata.candidatesTokenCount;
if (promptTokens && completionTokens) {
await this.tokenUsageService.recordTokenUsage(this.id, {
inputTokens: promptTokens,
outputTokens: completionTokens,
requestId: request.requestId
});
}
}
return { text: responseText };
} catch (error) {
throw new Error(`Failed to get response from Gemini API: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
protected initializeGemini(): GoogleGenAI {
const apiKey = this.apiKey();
if (!apiKey) {
throw new Error('Please provide GOOGLE_API_KEY in preferences or via environment variable');
}
// TODO test vertexai
return new GoogleGenAI({ apiKey, vertexai: false });
}
/**
* Implements a retry mechanism for the handle(non)Streaming request functions.
* @param fn the wrapped function to which the retry logic should be applied.
* @param retrySettings the configuration settings for the retry mechanism.
* @returns the result of the wrapped function.
*/
private async withRetry<T>(fn: () => Promise<T>): Promise<T> {
const { maxRetriesOnErrors, retryDelayOnRateLimitError, retryDelayOnOtherErrors } = this.retrySettings();
for (let i = 0; i <= maxRetriesOnErrors; i++) {
try {
return await fn();
} catch (error) {
if (i === maxRetriesOnErrors) {
// no retries left - throw the original error
throw error;
}
const message = (error as Error).message;
// Check for rate limit exhaustion (usually, there is a rate limit per minute, so we can retry after a delay...)
if (message && message.includes('429 Too Many Requests')) {
if (retryDelayOnRateLimitError < 0) {
// rate limit error should not retried because of the setting
throw error;
}
const delayMs = retryDelayOnRateLimitError * 1000;
console.warn(`Received 429 (Too Many Requests). Retrying in ${retryDelayOnRateLimitError}s. Attempt ${i + 1} of ${maxRetriesOnErrors}.`);
await wait(delayMs);
} else if (retryDelayOnOtherErrors < 0) {
// Other errors should not retried because of the setting
throw error;
} else {
const delayMs = retryDelayOnOtherErrors * 1000;
console.warn(`Request failed: ${message}. Retrying in ${retryDelayOnOtherErrors}s. Attempt ${i + 1} of ${maxRetriesOnErrors}.`);
await wait(delayMs);
}
// -> reiterate the loop for the next attempt
}
}
// This should not be reached
throw new Error('Retry mechanism failed unexpectedly.');
}
}

View File

@@ -0,0 +1,121 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
import { LanguageModelRegistry, LanguageModelStatus, TokenUsageService } from '@theia/ai-core';
import { inject, injectable } from '@theia/core/shared/inversify';
import { GoogleModel } from './google-language-model';
import { GoogleLanguageModelsManager, GoogleModelDescription } from '../common';
export interface GoogleLanguageModelRetrySettings {
maxRetriesOnErrors: number;
retryDelayOnRateLimitError: number;
retryDelayOnOtherErrors: number;
}
@injectable()
export class GoogleLanguageModelsManagerImpl implements GoogleLanguageModelsManager {
protected _apiKey: string | undefined;
protected retrySettings: GoogleLanguageModelRetrySettings = {
maxRetriesOnErrors: 3,
retryDelayOnRateLimitError: 60,
retryDelayOnOtherErrors: -1
};
@inject(LanguageModelRegistry)
protected readonly languageModelRegistry: LanguageModelRegistry;
@inject(TokenUsageService)
protected readonly tokenUsageService: TokenUsageService;
get apiKey(): string | undefined {
return this._apiKey ?? process.env.GOOGLE_API_KEY ?? process.env.GEMINI_API_KEY;
}
protected calculateStatus(effectiveApiKey: string | undefined): LanguageModelStatus {
return effectiveApiKey
? { status: 'ready' }
: { status: 'unavailable', message: 'No Google API key set' };
}
async createOrUpdateLanguageModels(...modelDescriptions: GoogleModelDescription[]): Promise<void> {
for (const modelDescription of modelDescriptions) {
const model = await this.languageModelRegistry.getLanguageModel(modelDescription.id);
const apiKeyProvider = () => {
if (modelDescription.apiKey === true) {
return this.apiKey;
}
if (modelDescription.apiKey) {
return modelDescription.apiKey;
}
return undefined;
};
const retrySettingsProvider = () => this.retrySettings;
// Determine the effective API key for status
const status = this.calculateStatus(apiKeyProvider());
if (model) {
if (!(model instanceof GoogleModel)) {
console.warn(`Gemini: model ${modelDescription.id} is not a Gemini model`);
continue;
}
await this.languageModelRegistry.patchLanguageModel<GoogleModel>(modelDescription.id, {
model: modelDescription.model,
enableStreaming: modelDescription.enableStreaming,
apiKey: apiKeyProvider,
retrySettings: retrySettingsProvider,
status
});
} else {
this.languageModelRegistry.addLanguageModels([
new GoogleModel(
modelDescription.id,
modelDescription.model,
status,
modelDescription.enableStreaming,
apiKeyProvider,
retrySettingsProvider,
this.tokenUsageService
)
]);
}
}
}
removeLanguageModels(...modelIds: string[]): void {
this.languageModelRegistry.removeLanguageModels(modelIds);
}
setApiKey(apiKey: string | undefined): void {
if (apiKey) {
this._apiKey = apiKey;
} else {
this._apiKey = undefined;
}
}
setMaxRetriesOnErrors(maxRetries: number): void {
this.retrySettings.maxRetriesOnErrors = maxRetries;
}
setRetryDelayOnRateLimitError(retryDelay: number): void {
this.retrySettings.retryDelayOnRateLimitError = retryDelay;
}
setRetryDelayOnOtherErrors(retryDelay: number): void {
this.retrySettings.retryDelayOnOtherErrors = retryDelay;
}
}

View File

@@ -0,0 +1,28 @@
// *****************************************************************************
// Copyright (C) 2025 EclipseSource GmbH and others.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// http://www.eclipse.org/legal/epl-2.0.
//
// This Source Code may also be made available under the following Secondary
// Licenses when the conditions for such availability set forth in the Eclipse
// Public License v. 2.0 are satisfied: GNU General Public License, version 2
// with the GNU Classpath Exception which is available at
// https://www.gnu.org/software/classpath/license.html.
//
// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
// *****************************************************************************
/* note: this bogus test file is required so that
we are able to run mocha unit tests on this
package, without having any actual unit tests in it.
This way a coverage report will be generated,
showing 0% coverage, instead of no report.
This file can be removed once we have real unit
tests in place. */
describe('ai-google package', () => {
it('support code coverage statistics', () => true);
});

View File

@@ -0,0 +1,19 @@
{
"extends": "../../configs/base.tsconfig",
"compilerOptions": {
"composite": true,
"rootDir": "src",
"outDir": "lib"
},
"include": [
"src"
],
"references": [
{
"path": "../ai-core"
},
{
"path": "../core"
}
]
}