91 lines
2.8 KiB
TypeScript
91 lines
2.8 KiB
TypeScript
import { NextResponse } from 'next/server';
|
|
import { getAdminDb } from '@/lib/firebase/admin';
|
|
import { createKnowledgeItem } from '@/lib/server/knowledge';
|
|
import type { KnowledgeSourceMeta } from '@/lib/types/knowledge';
|
|
|
|
const PROVIDER_MAP = new Set(['chatgpt', 'gemini', 'claude', 'cursor', 'vibn', 'other']);
|
|
|
|
interface ImportAiChatRequest {
|
|
title?: string;
|
|
provider?: string;
|
|
transcript?: string;
|
|
sourceLink?: string | null;
|
|
createdAtOriginal?: string | null;
|
|
}
|
|
|
|
export async function POST(
|
|
request: Request,
|
|
{ params }: { params: Promise<{ projectId: string }> },
|
|
) {
|
|
try {
|
|
const { projectId } = await params;
|
|
|
|
if (!projectId) {
|
|
return NextResponse.json({ error: 'Missing projectId' }, { status: 400 });
|
|
}
|
|
|
|
const body = (await request.json()) as ImportAiChatRequest;
|
|
const transcript = body.transcript?.trim();
|
|
const provider = body.provider?.toLowerCase();
|
|
|
|
if (!transcript) {
|
|
return NextResponse.json({ error: 'transcript is required' }, { status: 400 });
|
|
}
|
|
|
|
const adminDb = getAdminDb();
|
|
const projectSnap = await adminDb.collection('projects').doc(projectId).get();
|
|
if (!projectSnap.exists) {
|
|
return NextResponse.json({ error: 'Project not found' }, { status: 404 });
|
|
}
|
|
|
|
const origin = PROVIDER_MAP.has(provider ?? '') ? provider : 'other';
|
|
|
|
const sourceMeta: KnowledgeSourceMeta = {
|
|
origin: (origin as KnowledgeSourceMeta['origin']) ?? 'other',
|
|
url: body.sourceLink ?? null,
|
|
filename: body.title ?? null,
|
|
createdAtOriginal: body.createdAtOriginal ?? null,
|
|
importance: 'primary',
|
|
tags: ['ai_chat'],
|
|
};
|
|
|
|
const knowledgeItem = await createKnowledgeItem({
|
|
projectId,
|
|
sourceType: 'imported_ai_chat',
|
|
title: body.title ?? null,
|
|
content: transcript,
|
|
sourceMeta,
|
|
});
|
|
|
|
// Chunk and embed in background (don't block response)
|
|
// This populates AlloyDB knowledge_chunks for vector search
|
|
(async () => {
|
|
try {
|
|
const { writeKnowledgeChunksForItem } = await import('@/lib/server/vector-memory');
|
|
await writeKnowledgeChunksForItem({
|
|
id: knowledgeItem.id,
|
|
projectId: knowledgeItem.projectId,
|
|
content: knowledgeItem.content,
|
|
sourceMeta: knowledgeItem.sourceMeta,
|
|
});
|
|
} catch (error) {
|
|
// Log but don't fail the request
|
|
console.error('[import-ai-chat] Failed to chunk/embed knowledge_item:', error);
|
|
}
|
|
})();
|
|
|
|
return NextResponse.json({ knowledgeItem });
|
|
} catch (error) {
|
|
console.error('[import-ai-chat] Failed to import chat', error);
|
|
return NextResponse.json(
|
|
{
|
|
error: 'Failed to import AI chat transcript',
|
|
details: error instanceof Error ? error.message : String(error),
|
|
},
|
|
{ status: 500 },
|
|
);
|
|
}
|
|
}
|
|
|
|
|