feat: add /generate endpoint for structured one-shot LLM calls

Made-with: Cursor
This commit is contained in:
2026-03-03 21:11:19 -08:00
parent 6185e71578
commit d2ee04db59

View File

@@ -11,7 +11,7 @@ import { ToolContext } from './tools';
import { PROTECTED_GITEA_REPOS } from './tools/security';
import { orchestratorChat, listSessions, clearSession } from './orchestrator';
import { atlasChat, listAtlasSessions, clearAtlasSession } from './atlas';
import { LLMMessage } from './llm';
import { LLMMessage, createLLM } from './llm';
const app = express();
app.use(cors());
@@ -343,6 +343,27 @@ app.post('/webhook/gitea', (req: Request, res: Response) => {
});
});
// ---------------------------------------------------------------------------
// Generate — thin structured-generation endpoint (no session, no system prompt)
// Use this for one-shot tasks like architecture recommendations.
// ---------------------------------------------------------------------------
app.post('/generate', async (req: Request, res: Response) => {
const { prompt, model } = req.body as { prompt?: string; model?: string };
if (!prompt) { res.status(400).json({ error: '"prompt" is required' }); return; }
try {
const llm = createLLM(model ?? 'A', { temperature: 0.3 });
const messages: import('./llm').LLMMessage[] = [
{ role: 'user', content: prompt }
];
const response = await llm.chat(messages, [], 8192);
res.json({ reply: response.content ?? '' });
} catch (err) {
res.status(500).json({ error: err instanceof Error ? err.message : String(err) });
}
});
// ---------------------------------------------------------------------------
// Error handler
// ---------------------------------------------------------------------------