feat: migrate Gemini from Vertex AI to Google AI Studio API key
- gemini-client.ts: replaces Vertex AI REST + service account auth with direct generativelanguage.googleapis.com calls using GOOGLE_API_KEY. Removes all Firebase credential setup code. - summarize/route.ts: same migration, simplified to a single fetch call. - No longer depends on gen-lang-client-0980079410 GCP project for AI calls. Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
@@ -1,15 +1,8 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { GoogleGenAI } from "@google/genai";
|
||||
|
||||
const VERTEX_AI_MODEL = process.env.VERTEX_AI_MODEL || 'gemini-3-pro-preview';
|
||||
const VERTEX_PROJECT_ID = process.env.VERTEX_AI_PROJECT_ID || 'gen-lang-client-0980079410';
|
||||
const VERTEX_LOCATION = process.env.VERTEX_AI_LOCATION || 'global';
|
||||
|
||||
const genAI = new GoogleGenAI({
|
||||
project: VERTEX_PROJECT_ID,
|
||||
location: VERTEX_LOCATION,
|
||||
vertexai: true,
|
||||
});
|
||||
const MODEL = process.env.GEMINI_MODEL || 'gemini-2.0-flash-exp';
|
||||
const API_KEY = process.env.GOOGLE_API_KEY || '';
|
||||
const GEMINI_URL = `https://generativelanguage.googleapis.com/v1beta/models/${MODEL}:generateContent`;
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
@@ -19,10 +12,9 @@ export async function POST(request: NextRequest) {
|
||||
return NextResponse.json({ error: "Content is required" }, { status: 400 });
|
||||
}
|
||||
|
||||
// Truncate content if it's too long (Gemini has token limits)
|
||||
const maxContentLength = 30000; // ~30k characters
|
||||
const truncatedContent = content.length > maxContentLength
|
||||
? content.substring(0, maxContentLength) + "..."
|
||||
const maxContentLength = 30000;
|
||||
const truncatedContent = content.length > maxContentLength
|
||||
? content.substring(0, maxContentLength) + "..."
|
||||
: content;
|
||||
|
||||
const prompt = `Read this document titled "${title}" and provide a concise 1-2 sentence summary that captures the main topic and key points. Be specific and actionable.
|
||||
@@ -32,27 +24,27 @@ ${truncatedContent}
|
||||
|
||||
Summary:`;
|
||||
|
||||
const result = await genAI.models.generateContent({
|
||||
model: VERTEX_AI_MODEL,
|
||||
contents: [{
|
||||
role: 'user',
|
||||
parts: [{ text: prompt }],
|
||||
}],
|
||||
config: {
|
||||
temperature: 0.3, // Lower temperature for consistent summaries
|
||||
},
|
||||
const response = await fetch(`${GEMINI_URL}?key=${API_KEY}`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
contents: [{ role: 'user', parts: [{ text: prompt }] }],
|
||||
generationConfig: { temperature: 0.3 },
|
||||
}),
|
||||
});
|
||||
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Gemini API error (${response.status}): ${await response.text()}`);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const summary = result.candidates?.[0]?.content?.parts?.[0]?.text?.trim() || 'Summary unavailable';
|
||||
|
||||
return NextResponse.json({ summary });
|
||||
} catch (error) {
|
||||
console.error("Error generating summary:", error);
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: "Failed to generate summary",
|
||||
details: error instanceof Error ? error.message : String(error),
|
||||
},
|
||||
{ error: "Failed to generate summary", details: error instanceof Error ? error.message : String(error) },
|
||||
{ status: 500 }
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user